InlineSpiller.cpp revision 7536f72a97ad25c3652fdfe26d392fd78b6ea7b9
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// The inline spiller modifies the machine function directly instead of 11// inserting spills and restores in VirtRegMap. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "spiller" 16#include "Spiller.h" 17#include "SplitKit.h" 18#include "VirtRegMap.h" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "llvm/CodeGen/MachineFrameInfo.h" 21#include "llvm/CodeGen/MachineFunction.h" 22#include "llvm/CodeGen/MachineLoopInfo.h" 23#include "llvm/CodeGen/MachineRegisterInfo.h" 24#include "llvm/Target/TargetMachine.h" 25#include "llvm/Target/TargetInstrInfo.h" 26#include "llvm/Support/Debug.h" 27#include "llvm/Support/raw_ostream.h" 28 29using namespace llvm; 30 31namespace { 32class InlineSpiller : public Spiller { 33 MachineFunction &mf_; 34 LiveIntervals &lis_; 35 MachineLoopInfo &loops_; 36 VirtRegMap &vrm_; 37 MachineFrameInfo &mfi_; 38 MachineRegisterInfo &mri_; 39 const TargetInstrInfo &tii_; 40 const TargetRegisterInfo &tri_; 41 const BitVector reserved_; 42 43 SplitAnalysis splitAnalysis_; 44 45 // Variables that are valid during spill(), but used by multiple methods. 46 LiveInterval *li_; 47 std::vector<LiveInterval*> *newIntervals_; 48 const TargetRegisterClass *rc_; 49 int stackSlot_; 50 const SmallVectorImpl<LiveInterval*> *spillIs_; 51 52 // Values of the current interval that can potentially remat. 53 SmallPtrSet<VNInfo*, 8> reMattable_; 54 55 // Values in reMattable_ that failed to remat at some point. 56 SmallPtrSet<VNInfo*, 8> usedValues_; 57 58 ~InlineSpiller() {} 59 60public: 61 InlineSpiller(MachineFunctionPass &pass, 62 MachineFunction &mf, 63 VirtRegMap &vrm) 64 : mf_(mf), 65 lis_(pass.getAnalysis<LiveIntervals>()), 66 loops_(pass.getAnalysis<MachineLoopInfo>()), 67 vrm_(vrm), 68 mfi_(*mf.getFrameInfo()), 69 mri_(mf.getRegInfo()), 70 tii_(*mf.getTarget().getInstrInfo()), 71 tri_(*mf.getTarget().getRegisterInfo()), 72 reserved_(tri_.getReservedRegs(mf_)), 73 splitAnalysis_(mf, lis_, loops_) {} 74 75 void spill(LiveInterval *li, 76 std::vector<LiveInterval*> &newIntervals, 77 SmallVectorImpl<LiveInterval*> &spillIs, 78 SlotIndex *earliestIndex); 79 80private: 81 bool split(); 82 83 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, 84 SlotIndex UseIdx); 85 bool reMaterializeFor(MachineBasicBlock::iterator MI); 86 void reMaterializeAll(); 87 88 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 89 const SmallVectorImpl<unsigned> &Ops); 90 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 91 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 92}; 93} 94 95namespace llvm { 96Spiller *createInlineSpiller(MachineFunctionPass &pass, 97 MachineFunction &mf, 98 VirtRegMap &vrm) { 99 return new InlineSpiller(pass, mf, vrm); 100} 101} 102 103/// split - try splitting the current interval into pieces that may allocate 104/// separately. Return true if successful. 105bool InlineSpiller::split() { 106 // FIXME: Add intra-MBB splitting. 107 if (lis_.intervalIsInOneMBB(*li_)) 108 return false; 109 110 splitAnalysis_.analyze(li_); 111 112 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) { 113 SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 114 .splitAroundLoop(loop); 115 return true; 116 } 117 return false; 118} 119 120/// allUsesAvailableAt - Return true if all registers used by OrigMI at 121/// OrigIdx are also available with the same value at UseIdx. 122bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI, 123 SlotIndex OrigIdx, 124 SlotIndex UseIdx) { 125 OrigIdx = OrigIdx.getUseIndex(); 126 UseIdx = UseIdx.getUseIndex(); 127 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 128 const MachineOperand &MO = OrigMI->getOperand(i); 129 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 130 continue; 131 // Reserved registers are OK. 132 if (MO.isUndef() || !lis_.hasInterval(MO.getReg())) 133 continue; 134 // We don't want to move any defs. 135 if (MO.isDef()) 136 return false; 137 // We cannot depend on virtual registers in spillIs_. They will be spilled. 138 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 139 if ((*spillIs_)[si]->reg == MO.getReg()) 140 return false; 141 142 LiveInterval &LI = lis_.getInterval(MO.getReg()); 143 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx); 144 if (!OVNI) 145 continue; 146 if (OVNI != LI.getVNInfoAt(UseIdx)) 147 return false; 148 } 149 return true; 150} 151 152/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of 153/// reloading it. 154bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 155 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 156 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx); 157 if (!OrigVNI) { 158 DEBUG(dbgs() << "\tadding <undef> flags: "); 159 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 160 MachineOperand &MO = MI->getOperand(i); 161 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 162 MO.setIsUndef(); 163 } 164 DEBUG(dbgs() << UseIdx << '\t' << *MI); 165 return true; 166 } 167 if (!reMattable_.count(OrigVNI)) { 168 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": " 169 << UseIdx << '\t' << *MI); 170 return false; 171 } 172 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def); 173 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) { 174 usedValues_.insert(OrigVNI); 175 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 176 return false; 177 } 178 179 // If the instruction also writes li_->reg, it had better not require the same 180 // register for uses and defs. 181 bool Reads, Writes; 182 SmallVector<unsigned, 8> Ops; 183 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops); 184 if (Writes) { 185 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 186 MachineOperand &MO = MI->getOperand(Ops[i]); 187 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 188 usedValues_.insert(OrigVNI); 189 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 190 return false; 191 } 192 } 193 } 194 195 // Alocate a new register for the remat. 196 unsigned NewVReg = mri_.createVirtualRegister(rc_); 197 vrm_.grow(); 198 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 199 NewLI.markNotSpillable(); 200 newIntervals_->push_back(&NewLI); 201 202 // Finally we can rematerialize OrigMI before MI. 203 MachineBasicBlock &MBB = *MI->getParent(); 204 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_); 205 MachineBasicBlock::iterator RematMI = MI; 206 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex(); 207 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI); 208 209 // Replace operands 210 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 211 MachineOperand &MO = MI->getOperand(Ops[i]); 212 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) { 213 MO.setReg(NewVReg); 214 MO.setIsKill(); 215 } 216 } 217 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 218 219 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 220 lis_.getVNInfoAllocator()); 221 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 222 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 223 return true; 224} 225 226/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible, 227/// and trim the live ranges after. 228void InlineSpiller::reMaterializeAll() { 229 // Do a quick scan of the interval values to find if any are remattable. 230 reMattable_.clear(); 231 usedValues_.clear(); 232 for (LiveInterval::const_vni_iterator I = li_->vni_begin(), 233 E = li_->vni_end(); I != E; ++I) { 234 VNInfo *VNI = *I; 235 if (VNI->isUnused() || !VNI->isDefAccurate()) 236 continue; 237 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 238 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI)) 239 continue; 240 reMattable_.insert(VNI); 241 } 242 243 // Often, no defs are remattable. 244 if (reMattable_.empty()) 245 return; 246 247 // Try to remat before all uses of li_->reg. 248 bool anyRemat = false; 249 for (MachineRegisterInfo::use_nodbg_iterator 250 RI = mri_.use_nodbg_begin(li_->reg); 251 MachineInstr *MI = RI.skipInstruction();) 252 anyRemat |= reMaterializeFor(MI); 253 254 if (!anyRemat) 255 return; 256 257 // Remove any values that were completely rematted. 258 bool anyRemoved = false; 259 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(), 260 E = reMattable_.end(); I != E; ++I) { 261 VNInfo *VNI = *I; 262 if (VNI->hasPHIKill() || usedValues_.count(VNI)) 263 continue; 264 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 265 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 266 lis_.RemoveMachineInstrFromMaps(DefMI); 267 vrm_.RemoveMachineInstrFromMaps(DefMI); 268 DefMI->eraseFromParent(); 269 li_->removeValNo(VNI); 270 anyRemoved = true; 271 } 272 273 if (!anyRemoved) 274 return; 275 276 // Removing values may cause debug uses where li_ is not live. 277 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg); 278 MachineInstr *MI = RI.skipInstruction();) { 279 if (!MI->isDebugValue()) 280 continue; 281 // Try to preserve the debug value if li_ is live immediately after it. 282 MachineBasicBlock::iterator NextMI = MI; 283 ++NextMI; 284 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 285 SlotIndex NearIdx = lis_.getInstructionIndex(NextMI); 286 if (li_->liveAt(NearIdx)) 287 continue; 288 } 289 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 290 MI->eraseFromParent(); 291 } 292} 293 294/// foldMemoryOperand - Try folding stack slot references in Ops into MI. 295/// Return true on success, and MI will be erased. 296bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 297 const SmallVectorImpl<unsigned> &Ops) { 298 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 299 // operands. 300 SmallVector<unsigned, 8> FoldOps; 301 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 302 unsigned Idx = Ops[i]; 303 MachineOperand &MO = MI->getOperand(Idx); 304 if (MO.isImplicit()) 305 continue; 306 // FIXME: Teach targets to deal with subregs. 307 if (MO.getSubReg()) 308 return false; 309 // Tied use operands should not be passed to foldMemoryOperand. 310 if (!MI->isRegTiedToDefOperand(Idx)) 311 FoldOps.push_back(Idx); 312 } 313 314 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 315 if (!FoldMI) 316 return false; 317 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 318 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 319 MI->eraseFromParent(); 320 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 321 return true; 322} 323 324/// insertReload - Insert a reload of NewLI.reg before MI. 325void InlineSpiller::insertReload(LiveInterval &NewLI, 326 MachineBasicBlock::iterator MI) { 327 MachineBasicBlock &MBB = *MI->getParent(); 328 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 329 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 330 --MI; // Point to load instruction. 331 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 332 vrm_.addSpillSlotUse(stackSlot_, MI); 333 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 334 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 335 lis_.getVNInfoAllocator()); 336 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 337} 338 339/// insertSpill - Insert a spill of NewLI.reg after MI. 340void InlineSpiller::insertSpill(LiveInterval &NewLI, 341 MachineBasicBlock::iterator MI) { 342 MachineBasicBlock &MBB = *MI->getParent(); 343 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 344 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 345 --MI; // Point to store instruction. 346 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 347 vrm_.addSpillSlotUse(stackSlot_, MI); 348 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 349 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 350 lis_.getVNInfoAllocator()); 351 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 352} 353 354void InlineSpiller::spill(LiveInterval *li, 355 std::vector<LiveInterval*> &newIntervals, 356 SmallVectorImpl<LiveInterval*> &spillIs, 357 SlotIndex *earliestIndex) { 358 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 359 assert(li->isSpillable() && "Attempting to spill already spilled value."); 360 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 361 362 li_ = li; 363 newIntervals_ = &newIntervals; 364 rc_ = mri_.getRegClass(li->reg); 365 spillIs_ = &spillIs; 366 367 if (split()) 368 return; 369 370 reMaterializeAll(); 371 372 // Remat may handle everything. 373 if (li_->empty()) 374 return; 375 376 stackSlot_ = vrm_.getStackSlot(li->reg); 377 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) 378 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 379 380 // Iterate over instructions using register. 381 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 382 MachineInstr *MI = RI.skipInstruction();) { 383 384 // Debug values are not allowed to affect codegen. 385 if (MI->isDebugValue()) { 386 // Modify DBG_VALUE now that the value is in a spill slot. 387 uint64_t Offset = MI->getOperand(1).getImm(); 388 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 389 DebugLoc DL = MI->getDebugLoc(); 390 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 391 Offset, MDPtr, DL)) { 392 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 393 MachineBasicBlock *MBB = MI->getParent(); 394 MBB->insert(MBB->erase(MI), NewDV); 395 } else { 396 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 397 MI->eraseFromParent(); 398 } 399 continue; 400 } 401 402 // Analyze instruction. 403 bool Reads, Writes; 404 SmallVector<unsigned, 8> Ops; 405 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 406 407 // Attempt to fold memory ops. 408 if (foldMemoryOperand(MI, Ops)) 409 continue; 410 411 // Allocate interval around instruction. 412 // FIXME: Infer regclass from instruction alone. 413 unsigned NewVReg = mri_.createVirtualRegister(rc_); 414 vrm_.grow(); 415 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 416 NewLI.markNotSpillable(); 417 418 if (Reads) 419 insertReload(NewLI, MI); 420 421 // Rewrite instruction operands. 422 bool hasLiveDef = false; 423 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 424 MachineOperand &MO = MI->getOperand(Ops[i]); 425 MO.setReg(NewVReg); 426 if (MO.isUse()) { 427 if (!MI->isRegTiedToDefOperand(Ops[i])) 428 MO.setIsKill(); 429 } else { 430 if (!MO.isDead()) 431 hasLiveDef = true; 432 } 433 } 434 435 // FIXME: Use a second vreg if instruction has no tied ops. 436 if (Writes && hasLiveDef) 437 insertSpill(NewLI, MI); 438 439 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 440 newIntervals.push_back(&NewLI); 441 } 442} 443