InlineSpiller.cpp revision 7b1f498a7648b790929a4f97fd82228aa7ac7bea
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// The inline spiller modifies the machine function directly instead of 11// inserting spills and restores in VirtRegMap. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "Spiller.h" 17#include "LiveRangeEdit.h" 18#include "VirtRegMap.h" 19#include "llvm/Analysis/AliasAnalysis.h" 20#include "llvm/CodeGen/LiveIntervalAnalysis.h" 21#include "llvm/CodeGen/LiveStackAnalysis.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetInstrInfo.h" 27#include "llvm/Support/CommandLine.h" 28#include "llvm/Support/Debug.h" 29#include "llvm/Support/raw_ostream.h" 30 31using namespace llvm; 32 33static cl::opt<bool> 34VerifySpills("verify-spills", cl::desc("Verify after each spill/split")); 35 36namespace { 37class InlineSpiller : public Spiller { 38 MachineFunctionPass &pass_; 39 MachineFunction &mf_; 40 LiveIntervals &lis_; 41 LiveStacks &lss_; 42 AliasAnalysis *aa_; 43 VirtRegMap &vrm_; 44 MachineFrameInfo &mfi_; 45 MachineRegisterInfo &mri_; 46 const TargetInstrInfo &tii_; 47 const TargetRegisterInfo &tri_; 48 const BitVector reserved_; 49 50 // Variables that are valid during spill(), but used by multiple methods. 51 LiveRangeEdit *edit_; 52 const TargetRegisterClass *rc_; 53 int stackSlot_; 54 55 // Values that failed to remat at some point. 56 SmallPtrSet<VNInfo*, 8> usedValues_; 57 58 ~InlineSpiller() {} 59 60public: 61 InlineSpiller(MachineFunctionPass &pass, 62 MachineFunction &mf, 63 VirtRegMap &vrm) 64 : pass_(pass), 65 mf_(mf), 66 lis_(pass.getAnalysis<LiveIntervals>()), 67 lss_(pass.getAnalysis<LiveStacks>()), 68 aa_(&pass.getAnalysis<AliasAnalysis>()), 69 vrm_(vrm), 70 mfi_(*mf.getFrameInfo()), 71 mri_(mf.getRegInfo()), 72 tii_(*mf.getTarget().getInstrInfo()), 73 tri_(*mf.getTarget().getRegisterInfo()), 74 reserved_(tri_.getReservedRegs(mf_)) {} 75 76 void spill(LiveInterval *li, 77 SmallVectorImpl<LiveInterval*> &newIntervals, 78 const SmallVectorImpl<LiveInterval*> &spillIs); 79 80 void spill(LiveRangeEdit &); 81 82private: 83 bool reMaterializeFor(MachineBasicBlock::iterator MI); 84 void reMaterializeAll(); 85 86 bool coalesceStackAccess(MachineInstr *MI); 87 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 88 const SmallVectorImpl<unsigned> &Ops, 89 MachineInstr *LoadMI = 0); 90 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 91 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 92}; 93} 94 95namespace llvm { 96Spiller *createInlineSpiller(MachineFunctionPass &pass, 97 MachineFunction &mf, 98 VirtRegMap &vrm) { 99 if (VerifySpills) 100 mf.verify(&pass, "When creating inline spiller"); 101 return new InlineSpiller(pass, mf, vrm); 102} 103} 104 105/// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of 106/// reloading it. 107bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 108 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 109 VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx); 110 111 if (!OrigVNI) { 112 DEBUG(dbgs() << "\tadding <undef> flags: "); 113 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 114 MachineOperand &MO = MI->getOperand(i); 115 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) 116 MO.setIsUndef(); 117 } 118 DEBUG(dbgs() << UseIdx << '\t' << *MI); 119 return true; 120 } 121 122 LiveRangeEdit::Remat RM(OrigVNI); 123 if (!edit_->canRematerializeAt(RM, UseIdx, false, lis_)) { 124 usedValues_.insert(OrigVNI); 125 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 126 return false; 127 } 128 129 // If the instruction also writes edit_->getReg(), it had better not require 130 // the same register for uses and defs. 131 bool Reads, Writes; 132 SmallVector<unsigned, 8> Ops; 133 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops); 134 if (Writes) { 135 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 136 MachineOperand &MO = MI->getOperand(Ops[i]); 137 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 138 usedValues_.insert(OrigVNI); 139 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 140 return false; 141 } 142 } 143 } 144 145 // Before rematerializing into a register for a single instruction, try to 146 // fold a load into the instruction. That avoids allocating a new register. 147 if (RM.OrigMI->getDesc().canFoldAsLoad() && 148 foldMemoryOperand(MI, Ops, RM.OrigMI)) { 149 edit_->markRematerialized(RM.ParentVNI); 150 return true; 151 } 152 153 // Alocate a new register for the remat. 154 LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_); 155 NewLI.markNotSpillable(); 156 157 // Finally we can rematerialize OrigMI before MI. 158 SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 159 lis_, tii_, tri_); 160 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 161 << *lis_.getInstructionFromIndex(DefIdx)); 162 163 // Replace operands 164 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 165 MachineOperand &MO = MI->getOperand(Ops[i]); 166 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) { 167 MO.setReg(NewLI.reg); 168 MO.setIsKill(); 169 } 170 } 171 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 172 173 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator()); 174 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 175 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 176 return true; 177} 178 179/// reMaterializeAll - Try to rematerialize as many uses as possible, 180/// and trim the live ranges after. 181void InlineSpiller::reMaterializeAll() { 182 // Do a quick scan of the interval values to find if any are remattable. 183 if (!edit_->anyRematerializable(lis_, tii_, aa_)) 184 return; 185 186 usedValues_.clear(); 187 188 // Try to remat before all uses of edit_->getReg(). 189 bool anyRemat = false; 190 for (MachineRegisterInfo::use_nodbg_iterator 191 RI = mri_.use_nodbg_begin(edit_->getReg()); 192 MachineInstr *MI = RI.skipInstruction();) 193 anyRemat |= reMaterializeFor(MI); 194 195 if (!anyRemat) 196 return; 197 198 // Remove any values that were completely rematted. 199 bool anyRemoved = false; 200 for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(), 201 E = edit_->getParent().vni_end(); I != E; ++I) { 202 VNInfo *VNI = *I; 203 if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) || 204 usedValues_.count(VNI)) 205 continue; 206 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 207 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 208 lis_.RemoveMachineInstrFromMaps(DefMI); 209 vrm_.RemoveMachineInstrFromMaps(DefMI); 210 DefMI->eraseFromParent(); 211 VNI->def = SlotIndex(); 212 anyRemoved = true; 213 } 214 215 if (!anyRemoved) 216 return; 217 218 // Removing values may cause debug uses where parent is not live. 219 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg()); 220 MachineInstr *MI = RI.skipInstruction();) { 221 if (!MI->isDebugValue()) 222 continue; 223 // Try to preserve the debug value if parent is live immediately after it. 224 MachineBasicBlock::iterator NextMI = MI; 225 ++NextMI; 226 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 227 SlotIndex Idx = lis_.getInstructionIndex(NextMI); 228 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx); 229 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 230 continue; 231 } 232 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 233 MI->eraseFromParent(); 234 } 235} 236 237/// If MI is a load or store of stackSlot_, it can be removed. 238bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 239 int FI = 0; 240 unsigned reg; 241 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 242 !(reg = tii_.isStoreToStackSlot(MI, FI))) 243 return false; 244 245 // We have a stack access. Is it the right register and slot? 246 if (reg != edit_->getReg() || FI != stackSlot_) 247 return false; 248 249 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 250 lis_.RemoveMachineInstrFromMaps(MI); 251 MI->eraseFromParent(); 252 return true; 253} 254 255/// foldMemoryOperand - Try folding stack slot references in Ops into MI. 256/// @param MI Instruction using or defining the current register. 257/// @param Ops Operand indices from readsWritesVirtualRegister(). 258/// @param LoadMI Load instruction to use instead of stack slot when non-null. 259/// @return True on success, and MI will be erased. 260bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 261 const SmallVectorImpl<unsigned> &Ops, 262 MachineInstr *LoadMI) { 263 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 264 // operands. 265 SmallVector<unsigned, 8> FoldOps; 266 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 267 unsigned Idx = Ops[i]; 268 MachineOperand &MO = MI->getOperand(Idx); 269 if (MO.isImplicit()) 270 continue; 271 // FIXME: Teach targets to deal with subregs. 272 if (MO.getSubReg()) 273 return false; 274 // We cannot fold a load instruction into a def. 275 if (LoadMI && MO.isDef()) 276 return false; 277 // Tied use operands should not be passed to foldMemoryOperand. 278 if (!MI->isRegTiedToDefOperand(Idx)) 279 FoldOps.push_back(Idx); 280 } 281 282 MachineInstr *FoldMI = 283 LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI) 284 : tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 285 if (!FoldMI) 286 return false; 287 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 288 if (!LoadMI) 289 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 290 MI->eraseFromParent(); 291 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 292 return true; 293} 294 295/// insertReload - Insert a reload of NewLI.reg before MI. 296void InlineSpiller::insertReload(LiveInterval &NewLI, 297 MachineBasicBlock::iterator MI) { 298 MachineBasicBlock &MBB = *MI->getParent(); 299 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 300 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 301 --MI; // Point to load instruction. 302 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 303 vrm_.addSpillSlotUse(stackSlot_, MI); 304 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 305 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 306 lis_.getVNInfoAllocator()); 307 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 308} 309 310/// insertSpill - Insert a spill of NewLI.reg after MI. 311void InlineSpiller::insertSpill(LiveInterval &NewLI, 312 MachineBasicBlock::iterator MI) { 313 MachineBasicBlock &MBB = *MI->getParent(); 314 315 // Get the defined value. It could be an early clobber so keep the def index. 316 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 317 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx); 318 assert(VNI && VNI->def.getDefIndex() == Idx && "Inconsistent VNInfo"); 319 Idx = VNI->def; 320 321 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 322 --MI; // Point to store instruction. 323 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 324 vrm_.addSpillSlotUse(stackSlot_, MI); 325 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 326 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator()); 327 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 328} 329 330void InlineSpiller::spill(LiveInterval *li, 331 SmallVectorImpl<LiveInterval*> &newIntervals, 332 const SmallVectorImpl<LiveInterval*> &spillIs) { 333 LiveRangeEdit edit(*li, newIntervals, spillIs); 334 spill(edit); 335 if (VerifySpills) 336 mf_.verify(&pass_, "After inline spill"); 337} 338 339void InlineSpiller::spill(LiveRangeEdit &edit) { 340 edit_ = &edit; 341 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 342 && "Trying to spill a stack slot."); 343 DEBUG(dbgs() << "Inline spilling " 344 << mri_.getRegClass(edit.getReg())->getName() 345 << ':' << edit.getParent() << "\n"); 346 assert(edit.getParent().isSpillable() && 347 "Attempting to spill already spilled value."); 348 349 reMaterializeAll(); 350 351 // Remat may handle everything. 352 if (edit_->getParent().empty()) 353 return; 354 355 rc_ = mri_.getRegClass(edit.getReg()); 356 stackSlot_ = vrm_.assignVirt2StackSlot(edit_->getReg()); 357 358 // Update LiveStacks now that we are committed to spilling. 359 LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_); 360 assert(stacklvr.empty() && "Just created stack slot not empty"); 361 stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator()); 362 stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0)); 363 364 // Iterate over instructions using register. 365 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg()); 366 MachineInstr *MI = RI.skipInstruction();) { 367 368 // Debug values are not allowed to affect codegen. 369 if (MI->isDebugValue()) { 370 // Modify DBG_VALUE now that the value is in a spill slot. 371 uint64_t Offset = MI->getOperand(1).getImm(); 372 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 373 DebugLoc DL = MI->getDebugLoc(); 374 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 375 Offset, MDPtr, DL)) { 376 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 377 MachineBasicBlock *MBB = MI->getParent(); 378 MBB->insert(MBB->erase(MI), NewDV); 379 } else { 380 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 381 MI->eraseFromParent(); 382 } 383 continue; 384 } 385 386 // Stack slot accesses may coalesce away. 387 if (coalesceStackAccess(MI)) 388 continue; 389 390 // Analyze instruction. 391 bool Reads, Writes; 392 SmallVector<unsigned, 8> Ops; 393 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops); 394 395 // Attempt to fold memory ops. 396 if (foldMemoryOperand(MI, Ops)) 397 continue; 398 399 // Allocate interval around instruction. 400 // FIXME: Infer regclass from instruction alone. 401 LiveInterval &NewLI = edit.create(mri_, lis_, vrm_); 402 NewLI.markNotSpillable(); 403 404 if (Reads) 405 insertReload(NewLI, MI); 406 407 // Rewrite instruction operands. 408 bool hasLiveDef = false; 409 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 410 MachineOperand &MO = MI->getOperand(Ops[i]); 411 MO.setReg(NewLI.reg); 412 if (MO.isUse()) { 413 if (!MI->isRegTiedToDefOperand(Ops[i])) 414 MO.setIsKill(); 415 } else { 416 if (!MO.isDead()) 417 hasLiveDef = true; 418 } 419 } 420 421 // FIXME: Use a second vreg if instruction has no tied ops. 422 if (Writes && hasLiveDef) 423 insertSpill(NewLI, MI); 424 425 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 426 } 427} 428