InlineSpiller.cpp revision 7d57753df3bbad14e3e3ddf396b4edbdc4c55d7d
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// The inline spiller modifies the machine function directly instead of 11// inserting spills and restores in VirtRegMap. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "spiller" 16#include "Spiller.h" 17#include "LiveRangeEdit.h" 18#include "SplitKit.h" 19#include "VirtRegMap.h" 20#include "llvm/CodeGen/LiveIntervalAnalysis.h" 21#include "llvm/CodeGen/LiveStackAnalysis.h" 22#include "llvm/CodeGen/MachineDominators.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineLoopInfo.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/Target/TargetMachine.h" 28#include "llvm/Target/TargetInstrInfo.h" 29#include "llvm/Support/CommandLine.h" 30#include "llvm/Support/Debug.h" 31#include "llvm/Support/raw_ostream.h" 32 33using namespace llvm; 34 35static cl::opt<bool> 36VerifySpills("verify-spills", cl::desc("Verify after each spill/split")); 37 38namespace { 39class InlineSpiller : public Spiller { 40 MachineFunctionPass &pass_; 41 MachineFunction &mf_; 42 LiveIntervals &lis_; 43 LiveStacks &lss_; 44 MachineDominatorTree &mdt_; 45 MachineLoopInfo &loops_; 46 VirtRegMap &vrm_; 47 MachineFrameInfo &mfi_; 48 MachineRegisterInfo &mri_; 49 const TargetInstrInfo &tii_; 50 const TargetRegisterInfo &tri_; 51 const BitVector reserved_; 52 53 SplitAnalysis splitAnalysis_; 54 55 // Variables that are valid during spill(), but used by multiple methods. 56 LiveRangeEdit *edit_; 57 const TargetRegisterClass *rc_; 58 int stackSlot_; 59 60 // Values that failed to remat at some point. 61 SmallPtrSet<VNInfo*, 8> usedValues_; 62 63 ~InlineSpiller() {} 64 65public: 66 InlineSpiller(MachineFunctionPass &pass, 67 MachineFunction &mf, 68 VirtRegMap &vrm) 69 : pass_(pass), 70 mf_(mf), 71 lis_(pass.getAnalysis<LiveIntervals>()), 72 lss_(pass.getAnalysis<LiveStacks>()), 73 mdt_(pass.getAnalysis<MachineDominatorTree>()), 74 loops_(pass.getAnalysis<MachineLoopInfo>()), 75 vrm_(vrm), 76 mfi_(*mf.getFrameInfo()), 77 mri_(mf.getRegInfo()), 78 tii_(*mf.getTarget().getInstrInfo()), 79 tri_(*mf.getTarget().getRegisterInfo()), 80 reserved_(tri_.getReservedRegs(mf_)), 81 splitAnalysis_(mf, lis_, loops_) {} 82 83 void spill(LiveInterval *li, 84 SmallVectorImpl<LiveInterval*> &newIntervals, 85 SmallVectorImpl<LiveInterval*> &spillIs); 86 87 void spill(LiveRangeEdit &); 88 89private: 90 bool split(); 91 92 bool reMaterializeFor(MachineBasicBlock::iterator MI); 93 void reMaterializeAll(); 94 95 bool coalesceStackAccess(MachineInstr *MI); 96 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 97 const SmallVectorImpl<unsigned> &Ops); 98 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 99 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 100}; 101} 102 103namespace llvm { 104Spiller *createInlineSpiller(MachineFunctionPass &pass, 105 MachineFunction &mf, 106 VirtRegMap &vrm) { 107 if (VerifySpills) 108 mf.verify(&pass); 109 return new InlineSpiller(pass, mf, vrm); 110} 111} 112 113/// split - try splitting the current interval into pieces that may allocate 114/// separately. Return true if successful. 115bool InlineSpiller::split() { 116 splitAnalysis_.analyze(&edit_->getParent()); 117 118 // Try splitting around loops. 119 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) { 120 SplitEditor(splitAnalysis_, lis_, vrm_, mdt_, *edit_) 121 .splitAroundLoop(loop); 122 return true; 123 } 124 125 // Try splitting into single block intervals. 126 SplitAnalysis::BlockPtrSet blocks; 127 if (splitAnalysis_.getMultiUseBlocks(blocks)) { 128 SplitEditor(splitAnalysis_, lis_, vrm_, mdt_, *edit_) 129 .splitSingleBlocks(blocks); 130 return true; 131 } 132 133 // Try splitting inside a basic block. 134 if (const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit()) { 135 SplitEditor(splitAnalysis_, lis_, vrm_, mdt_, *edit_) 136 .splitInsideBlock(MBB); 137 return true; 138 } 139 140 return false; 141} 142 143/// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of 144/// reloading it. 145bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 146 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 147 VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx); 148 149 if (!OrigVNI) { 150 DEBUG(dbgs() << "\tadding <undef> flags: "); 151 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 152 MachineOperand &MO = MI->getOperand(i); 153 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) 154 MO.setIsUndef(); 155 } 156 DEBUG(dbgs() << UseIdx << '\t' << *MI); 157 return true; 158 } 159 160 LiveRangeEdit::Remat RM = edit_->canRematerializeAt(OrigVNI, UseIdx, false, 161 lis_); 162 if (!RM) { 163 usedValues_.insert(OrigVNI); 164 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 165 return false; 166 } 167 168 // If the instruction also writes edit_->getReg(), it had better not require 169 // the same register for uses and defs. 170 bool Reads, Writes; 171 SmallVector<unsigned, 8> Ops; 172 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops); 173 if (Writes) { 174 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 175 MachineOperand &MO = MI->getOperand(Ops[i]); 176 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 177 usedValues_.insert(OrigVNI); 178 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 179 return false; 180 } 181 } 182 } 183 184 // Alocate a new register for the remat. 185 LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_); 186 NewLI.markNotSpillable(); 187 188 // Finally we can rematerialize OrigMI before MI. 189 SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 190 lis_, tii_, tri_); 191 DEBUG(dbgs() << "\tremat: " << DefIdx << '\n'); 192 193 // Replace operands 194 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 195 MachineOperand &MO = MI->getOperand(Ops[i]); 196 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) { 197 MO.setReg(NewLI.reg); 198 MO.setIsKill(); 199 } 200 } 201 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 202 203 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator()); 204 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 205 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 206 return true; 207} 208 209/// reMaterializeAll - Try to rematerialize as many uses as possible, 210/// and trim the live ranges after. 211void InlineSpiller::reMaterializeAll() { 212 // Do a quick scan of the interval values to find if any are remattable. 213 if (!edit_->anyRematerializable(lis_, tii_, 0)) 214 return; 215 216 usedValues_.clear(); 217 218 // Try to remat before all uses of edit_->getReg(). 219 bool anyRemat = false; 220 for (MachineRegisterInfo::use_nodbg_iterator 221 RI = mri_.use_nodbg_begin(edit_->getReg()); 222 MachineInstr *MI = RI.skipInstruction();) 223 anyRemat |= reMaterializeFor(MI); 224 225 if (!anyRemat) 226 return; 227 228 // Remove any values that were completely rematted. 229 bool anyRemoved = false; 230 for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(), 231 E = edit_->getParent().vni_end(); I != E; ++I) { 232 VNInfo *VNI = *I; 233 if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) || 234 usedValues_.count(VNI)) 235 continue; 236 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 237 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 238 lis_.RemoveMachineInstrFromMaps(DefMI); 239 vrm_.RemoveMachineInstrFromMaps(DefMI); 240 DefMI->eraseFromParent(); 241 VNI->def = SlotIndex(); 242 anyRemoved = true; 243 } 244 245 if (!anyRemoved) 246 return; 247 248 // Removing values may cause debug uses where parent is not live. 249 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg()); 250 MachineInstr *MI = RI.skipInstruction();) { 251 if (!MI->isDebugValue()) 252 continue; 253 // Try to preserve the debug value if parent is live immediately after it. 254 MachineBasicBlock::iterator NextMI = MI; 255 ++NextMI; 256 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 257 SlotIndex Idx = lis_.getInstructionIndex(NextMI); 258 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx); 259 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 260 continue; 261 } 262 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 263 MI->eraseFromParent(); 264 } 265} 266 267/// If MI is a load or store of stackSlot_, it can be removed. 268bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 269 int FI = 0; 270 unsigned reg; 271 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 272 !(reg = tii_.isStoreToStackSlot(MI, FI))) 273 return false; 274 275 // We have a stack access. Is it the right register and slot? 276 if (reg != edit_->getReg() || FI != stackSlot_) 277 return false; 278 279 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 280 lis_.RemoveMachineInstrFromMaps(MI); 281 MI->eraseFromParent(); 282 return true; 283} 284 285/// foldMemoryOperand - Try folding stack slot references in Ops into MI. 286/// Return true on success, and MI will be erased. 287bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 288 const SmallVectorImpl<unsigned> &Ops) { 289 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 290 // operands. 291 SmallVector<unsigned, 8> FoldOps; 292 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 293 unsigned Idx = Ops[i]; 294 MachineOperand &MO = MI->getOperand(Idx); 295 if (MO.isImplicit()) 296 continue; 297 // FIXME: Teach targets to deal with subregs. 298 if (MO.getSubReg()) 299 return false; 300 // Tied use operands should not be passed to foldMemoryOperand. 301 if (!MI->isRegTiedToDefOperand(Idx)) 302 FoldOps.push_back(Idx); 303 } 304 305 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 306 if (!FoldMI) 307 return false; 308 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 309 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 310 MI->eraseFromParent(); 311 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 312 return true; 313} 314 315/// insertReload - Insert a reload of NewLI.reg before MI. 316void InlineSpiller::insertReload(LiveInterval &NewLI, 317 MachineBasicBlock::iterator MI) { 318 MachineBasicBlock &MBB = *MI->getParent(); 319 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 320 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 321 --MI; // Point to load instruction. 322 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 323 vrm_.addSpillSlotUse(stackSlot_, MI); 324 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 325 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 326 lis_.getVNInfoAllocator()); 327 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 328} 329 330/// insertSpill - Insert a spill of NewLI.reg after MI. 331void InlineSpiller::insertSpill(LiveInterval &NewLI, 332 MachineBasicBlock::iterator MI) { 333 MachineBasicBlock &MBB = *MI->getParent(); 334 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 335 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 336 --MI; // Point to store instruction. 337 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 338 vrm_.addSpillSlotUse(stackSlot_, MI); 339 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 340 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator()); 341 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 342} 343 344void InlineSpiller::spill(LiveInterval *li, 345 SmallVectorImpl<LiveInterval*> &newIntervals, 346 SmallVectorImpl<LiveInterval*> &spillIs) { 347 LiveRangeEdit edit(*li, newIntervals, spillIs); 348 spill(edit); 349 if (VerifySpills) 350 mf_.verify(&pass_); 351} 352 353void InlineSpiller::spill(LiveRangeEdit &edit) { 354 edit_ = &edit; 355 assert(!edit.getParent().isStackSlot() && "Trying to spill a stack slot."); 356 DEBUG(dbgs() << "Inline spilling " 357 << mri_.getRegClass(edit.getReg())->getName() 358 << ':' << edit.getParent() << "\n"); 359 assert(edit.getParent().isSpillable() && 360 "Attempting to spill already spilled value."); 361 362 if (split()) 363 return; 364 365 reMaterializeAll(); 366 367 // Remat may handle everything. 368 if (edit_->getParent().empty()) 369 return; 370 371 rc_ = mri_.getRegClass(edit.getReg()); 372 stackSlot_ = edit.assignStackSlot(vrm_); 373 374 // Update LiveStacks now that we are committed to spilling. 375 LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_); 376 if (!stacklvr.hasAtLeastOneValue()) 377 stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator()); 378 stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0)); 379 380 // Iterate over instructions using register. 381 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg()); 382 MachineInstr *MI = RI.skipInstruction();) { 383 384 // Debug values are not allowed to affect codegen. 385 if (MI->isDebugValue()) { 386 // Modify DBG_VALUE now that the value is in a spill slot. 387 uint64_t Offset = MI->getOperand(1).getImm(); 388 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 389 DebugLoc DL = MI->getDebugLoc(); 390 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 391 Offset, MDPtr, DL)) { 392 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 393 MachineBasicBlock *MBB = MI->getParent(); 394 MBB->insert(MBB->erase(MI), NewDV); 395 } else { 396 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 397 MI->eraseFromParent(); 398 } 399 continue; 400 } 401 402 // Stack slot accesses may coalesce away. 403 if (coalesceStackAccess(MI)) 404 continue; 405 406 // Analyze instruction. 407 bool Reads, Writes; 408 SmallVector<unsigned, 8> Ops; 409 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops); 410 411 // Attempt to fold memory ops. 412 if (foldMemoryOperand(MI, Ops)) 413 continue; 414 415 // Allocate interval around instruction. 416 // FIXME: Infer regclass from instruction alone. 417 LiveInterval &NewLI = edit.create(mri_, lis_, vrm_); 418 NewLI.markNotSpillable(); 419 420 if (Reads) 421 insertReload(NewLI, MI); 422 423 // Rewrite instruction operands. 424 bool hasLiveDef = false; 425 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 426 MachineOperand &MO = MI->getOperand(Ops[i]); 427 MO.setReg(NewLI.reg); 428 if (MO.isUse()) { 429 if (!MI->isRegTiedToDefOperand(Ops[i])) 430 MO.setIsKill(); 431 } else { 432 if (!MO.isDead()) 433 hasLiveDef = true; 434 } 435 } 436 437 // FIXME: Use a second vreg if instruction has no tied ops. 438 if (Writes && hasLiveDef) 439 insertSpill(NewLI, MI); 440 441 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 442 } 443} 444