Spiller.cpp revision 0a2b2a1497a77f1db281d4dc9f21d01fe48f6ec6
1//===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#define DEBUG_TYPE "spiller" 11 12#include "Spiller.h" 13#include "VirtRegMap.h" 14#include "llvm/CodeGen/LiveIntervalAnalysis.h" 15#include "llvm/CodeGen/MachineFrameInfo.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineInstrBuilder.h" 18#include "llvm/CodeGen/MachineLoopInfo.h" 19#include "llvm/CodeGen/MachineRegisterInfo.h" 20#include "llvm/Target/TargetMachine.h" 21#include "llvm/Target/TargetInstrInfo.h" 22#include "llvm/Support/CommandLine.h" 23#include "llvm/Support/Debug.h" 24#include "llvm/Support/ErrorHandling.h" 25#include "llvm/Support/raw_ostream.h" 26#include <set> 27 28using namespace llvm; 29 30namespace { 31 enum SpillerName { trivial, standard, splitting, inline_ }; 32} 33 34static cl::opt<SpillerName> 35spillerOpt("spiller", 36 cl::desc("Spiller to use: (default: standard)"), 37 cl::Prefix, 38 cl::values(clEnumVal(trivial, "trivial spiller"), 39 clEnumVal(standard, "default spiller"), 40 clEnumVal(splitting, "splitting spiller"), 41 clEnumValN(inline_, "inline", "inline spiller"), 42 clEnumValEnd), 43 cl::init(standard)); 44 45// Spiller virtual destructor implementation. 46Spiller::~Spiller() {} 47 48namespace { 49 50/// Utility class for spillers. 51class SpillerBase : public Spiller { 52protected: 53 MachineFunctionPass *pass; 54 MachineFunction *mf; 55 VirtRegMap *vrm; 56 LiveIntervals *lis; 57 MachineFrameInfo *mfi; 58 MachineRegisterInfo *mri; 59 const TargetInstrInfo *tii; 60 const TargetRegisterInfo *tri; 61 62 /// Construct a spiller base. 63 SpillerBase(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) 64 : pass(&pass), mf(&mf), vrm(&vrm) 65 { 66 lis = &pass.getAnalysis<LiveIntervals>(); 67 mfi = mf.getFrameInfo(); 68 mri = &mf.getRegInfo(); 69 tii = mf.getTarget().getInstrInfo(); 70 tri = mf.getTarget().getRegisterInfo(); 71 } 72 73 /// Add spill ranges for every use/def of the live interval, inserting loads 74 /// immediately before each use, and stores after each def. No folding or 75 /// remat is attempted. 76 void trivialSpillEverywhere(LiveInterval *li, 77 SmallVectorImpl<LiveInterval*> &newIntervals) { 78 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n"); 79 80 assert(li->weight != HUGE_VALF && 81 "Attempting to spill already spilled value."); 82 83 assert(!li->isStackSlot() && 84 "Trying to spill a stack slot."); 85 86 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n"); 87 88 const TargetRegisterClass *trc = mri->getRegClass(li->reg); 89 unsigned ss = vrm->assignVirt2StackSlot(li->reg); 90 91 // Iterate over reg uses/defs. 92 for (MachineRegisterInfo::reg_iterator 93 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) { 94 95 // Grab the use/def instr. 96 MachineInstr *mi = &*regItr; 97 98 DEBUG(dbgs() << " Processing " << *mi); 99 100 // Step regItr to the next use/def instr. 101 do { 102 ++regItr; 103 } while (regItr != mri->reg_end() && (&*regItr == mi)); 104 105 // Collect uses & defs for this instr. 106 SmallVector<unsigned, 2> indices; 107 bool hasUse = false; 108 bool hasDef = false; 109 for (unsigned i = 0; i != mi->getNumOperands(); ++i) { 110 MachineOperand &op = mi->getOperand(i); 111 if (!op.isReg() || op.getReg() != li->reg) 112 continue; 113 hasUse |= mi->getOperand(i).isUse(); 114 hasDef |= mi->getOperand(i).isDef(); 115 indices.push_back(i); 116 } 117 118 // Create a new vreg & interval for this instr. 119 unsigned newVReg = mri->createVirtualRegister(trc); 120 vrm->grow(); 121 vrm->assignVirt2StackSlot(newVReg, ss); 122 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg); 123 newLI->weight = HUGE_VALF; 124 125 // Update the reg operands & kill flags. 126 for (unsigned i = 0; i < indices.size(); ++i) { 127 unsigned mopIdx = indices[i]; 128 MachineOperand &mop = mi->getOperand(mopIdx); 129 mop.setReg(newVReg); 130 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) { 131 mop.setIsKill(true); 132 } 133 } 134 assert(hasUse || hasDef); 135 136 // Insert reload if necessary. 137 MachineBasicBlock::iterator miItr(mi); 138 if (hasUse) { 139 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc, 140 tri); 141 MachineInstr *loadInstr(prior(miItr)); 142 SlotIndex loadIndex = 143 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex(); 144 vrm->addSpillSlotUse(ss, loadInstr); 145 SlotIndex endIndex = loadIndex.getNextIndex(); 146 VNInfo *loadVNI = 147 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator()); 148 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI)); 149 } 150 151 // Insert store if necessary. 152 if (hasDef) { 153 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, 154 true, ss, trc, tri); 155 MachineInstr *storeInstr(llvm::next(miItr)); 156 SlotIndex storeIndex = 157 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex(); 158 vrm->addSpillSlotUse(ss, storeInstr); 159 SlotIndex beginIndex = storeIndex.getPrevIndex(); 160 VNInfo *storeVNI = 161 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator()); 162 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI)); 163 } 164 165 newIntervals.push_back(newLI); 166 } 167 } 168}; 169 170} // end anonymous namespace 171 172namespace { 173 174/// Spills any live range using the spill-everywhere method with no attempt at 175/// folding. 176class TrivialSpiller : public SpillerBase { 177public: 178 179 TrivialSpiller(MachineFunctionPass &pass, MachineFunction &mf, 180 VirtRegMap &vrm) 181 : SpillerBase(pass, mf, vrm) {} 182 183 void spill(LiveInterval *li, 184 SmallVectorImpl<LiveInterval*> &newIntervals, 185 SmallVectorImpl<LiveInterval*> &) { 186 // Ignore spillIs - we don't use it. 187 trivialSpillEverywhere(li, newIntervals); 188 } 189}; 190 191} // end anonymous namespace 192 193namespace { 194 195/// Falls back on LiveIntervals::addIntervalsForSpills. 196class StandardSpiller : public Spiller { 197protected: 198 LiveIntervals *lis; 199 MachineLoopInfo *loopInfo; 200 VirtRegMap *vrm; 201public: 202 StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf, 203 VirtRegMap &vrm) 204 : lis(&pass.getAnalysis<LiveIntervals>()), 205 loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()), 206 vrm(&vrm) {} 207 208 /// Falls back on LiveIntervals::addIntervalsForSpills. 209 void spill(LiveInterval *li, 210 SmallVectorImpl<LiveInterval*> &newIntervals, 211 SmallVectorImpl<LiveInterval*> &spillIs) { 212 std::vector<LiveInterval*> added = 213 lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm); 214 newIntervals.insert(newIntervals.end(), added.begin(), added.end()); 215 } 216}; 217 218} // end anonymous namespace 219 220namespace { 221 222/// When a call to spill is placed this spiller will first try to break the 223/// interval up into its component values (one new interval per value). 224/// If this fails, or if a call is placed to spill a previously split interval 225/// then the spiller falls back on the standard spilling mechanism. 226class SplittingSpiller : public StandardSpiller { 227public: 228 SplittingSpiller(MachineFunctionPass &pass, MachineFunction &mf, 229 VirtRegMap &vrm) 230 : StandardSpiller(pass, mf, vrm) { 231 mri = &mf.getRegInfo(); 232 tii = mf.getTarget().getInstrInfo(); 233 tri = mf.getTarget().getRegisterInfo(); 234 } 235 236 void spill(LiveInterval *li, 237 SmallVectorImpl<LiveInterval*> &newIntervals, 238 SmallVectorImpl<LiveInterval*> &spillIs) { 239 if (worthTryingToSplit(li)) 240 tryVNISplit(li); 241 else 242 StandardSpiller::spill(li, newIntervals, spillIs); 243 } 244 245private: 246 247 MachineRegisterInfo *mri; 248 const TargetInstrInfo *tii; 249 const TargetRegisterInfo *tri; 250 DenseSet<LiveInterval*> alreadySplit; 251 252 bool worthTryingToSplit(LiveInterval *li) const { 253 return (!alreadySplit.count(li) && li->getNumValNums() > 1); 254 } 255 256 /// Try to break a LiveInterval into its component values. 257 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li) { 258 259 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n"); 260 261 std::vector<LiveInterval*> added; 262 SmallVector<VNInfo*, 4> vnis; 263 264 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis)); 265 266 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(), 267 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) { 268 VNInfo *vni = *vniItr; 269 270 // Skip unused VNIs. 271 if (vni->isUnused()) 272 continue; 273 274 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as "); 275 LiveInterval *splitInterval = extractVNI(li, vni); 276 277 if (splitInterval != 0) { 278 DEBUG(dbgs() << *splitInterval << "\n"); 279 added.push_back(splitInterval); 280 alreadySplit.insert(splitInterval); 281 } else { 282 DEBUG(dbgs() << "0\n"); 283 } 284 } 285 286 DEBUG(dbgs() << "Original LI: " << *li << "\n"); 287 288 // If there original interval still contains some live ranges 289 // add it to added and alreadySplit. 290 if (!li->empty()) { 291 added.push_back(li); 292 alreadySplit.insert(li); 293 } 294 295 return added; 296 } 297 298 /// Extract the given value number from the interval. 299 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const { 300 assert(vni->isDefAccurate() || vni->isPHIDef()); 301 302 // Create a new vreg and live interval, copy VNI ranges over. 303 const TargetRegisterClass *trc = mri->getRegClass(li->reg); 304 unsigned newVReg = mri->createVirtualRegister(trc); 305 vrm->grow(); 306 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg); 307 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator()); 308 309 // Start by copying all live ranges in the VN to the new interval. 310 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end(); 311 rItr != rEnd; ++rItr) { 312 if (rItr->valno == vni) { 313 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI)); 314 } 315 } 316 317 // Erase the old VNI & ranges. 318 li->removeValNo(vni); 319 320 // Collect all current uses of the register belonging to the given VNI. 321 // We'll use this to rename the register after we've dealt with the def. 322 std::set<MachineInstr*> uses; 323 for (MachineRegisterInfo::use_iterator 324 useItr = mri->use_begin(li->reg), useEnd = mri->use_end(); 325 useItr != useEnd; ++useItr) { 326 uses.insert(&*useItr); 327 } 328 329 // Process the def instruction for this VNI. 330 if (newVNI->isPHIDef()) { 331 // Insert a copy at the start of the MBB. The range proceeding the 332 // copy will be attached to the original LiveInterval. 333 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def); 334 MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(), 335 tii->get(TargetOpcode::COPY), newVReg) 336 .addReg(li->reg, RegState::Kill); 337 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 338 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB), 339 0, false, lis->getVNInfoAllocator()); 340 phiDefVNI->setIsPHIDef(true); 341 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI)); 342 LiveRange *oldPHIDefRange = 343 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB)); 344 345 // If the old phi def starts in the middle of the range chop it up. 346 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) { 347 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end, 348 oldPHIDefRange->valno); 349 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB); 350 newLI->addRange(oldPHIDefRange2); 351 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) { 352 // Otherwise if it's at the start of the range just trim it. 353 oldPHIDefRange->start = copyIdx.getDefIndex(); 354 } else { 355 assert(false && "PHI def range doesn't cover PHI def?"); 356 } 357 358 newVNI->def = copyIdx.getDefIndex(); 359 newVNI->setCopy(copyMI); 360 newVNI->setIsPHIDef(false); // not a PHI def anymore. 361 newVNI->setIsDefAccurate(true); 362 } else { 363 // non-PHI def. Rename the def. If it's two-addr that means renaming the 364 // use and inserting a new copy too. 365 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def); 366 // We'll rename this now, so we can remove it from uses. 367 uses.erase(defInst); 368 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg); 369 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx), 370 twoAddrUseIsUndef = false; 371 372 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) { 373 MachineOperand &mo = defInst->getOperand(i); 374 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) { 375 mo.setReg(newVReg); 376 if (isTwoAddr && mo.isUse() && mo.isUndef()) 377 twoAddrUseIsUndef = true; 378 } 379 } 380 381 SlotIndex defIdx = lis->getInstructionIndex(defInst); 382 newVNI->def = defIdx.getDefIndex(); 383 384 if (isTwoAddr && !twoAddrUseIsUndef) { 385 MachineBasicBlock *defMBB = defInst->getParent(); 386 MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(), 387 tii->get(TargetOpcode::COPY), newVReg) 388 .addReg(li->reg, RegState::Kill); 389 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 390 LiveRange *origUseRange = 391 li->getLiveRangeContaining(newVNI->def.getUseIndex()); 392 origUseRange->end = copyIdx.getDefIndex(); 393 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI, 394 true, lis->getVNInfoAllocator()); 395 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI); 396 newLI->addRange(copyRange); 397 } 398 } 399 400 for (std::set<MachineInstr*>::iterator 401 usesItr = uses.begin(), usesEnd = uses.end(); 402 usesItr != usesEnd; ++usesItr) { 403 MachineInstr *useInst = *usesItr; 404 SlotIndex useIdx = lis->getInstructionIndex(useInst); 405 LiveRange *useRange = 406 newLI->getLiveRangeContaining(useIdx.getUseIndex()); 407 408 // If this use doesn't belong to the new interval skip it. 409 if (useRange == 0) 410 continue; 411 412 // This use doesn't belong to the VNI, skip it. 413 if (useRange->valno != newVNI) 414 continue; 415 416 // Check if this instr is two address. 417 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg); 418 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx); 419 420 // Rename uses (and defs for two-address instrs). 421 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) { 422 MachineOperand &mo = useInst->getOperand(i); 423 if (mo.isReg() && (mo.isUse() || isTwoAddress) && 424 (mo.getReg() == li->reg)) { 425 mo.setReg(newVReg); 426 } 427 } 428 429 // If this is a two address instruction we've got some extra work to do. 430 if (isTwoAddress) { 431 // We modified the def operand, so we need to copy back to the original 432 // reg. 433 MachineBasicBlock *useMBB = useInst->getParent(); 434 MachineBasicBlock::iterator useItr(useInst); 435 MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(), 436 tii->get(TargetOpcode::COPY), newVReg) 437 .addReg(li->reg, RegState::Kill); 438 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 439 440 // Change the old two-address defined range & vni to start at 441 // (and be defined by) the copy. 442 LiveRange *origDefRange = 443 li->getLiveRangeContaining(useIdx.getDefIndex()); 444 origDefRange->start = copyIdx.getDefIndex(); 445 origDefRange->valno->def = copyIdx.getDefIndex(); 446 origDefRange->valno->setCopy(copyMI); 447 448 // Insert a new range & vni for the two-address-to-copy value. This 449 // will be attached to the new live interval. 450 VNInfo *copyVNI = 451 newLI->getNextValue(useIdx.getDefIndex(), 0, true, 452 lis->getVNInfoAllocator()); 453 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI); 454 newLI->addRange(copyRange); 455 } 456 } 457 458 // Iterate over any PHI kills - we'll need to insert new copies for them. 459 for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end(); 460 LRI != LRE; ++LRI) { 461 if (LRI->valno != newVNI || LRI->end.isPHI()) 462 continue; 463 SlotIndex killIdx = LRI->end; 464 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx); 465 MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(), 466 DebugLoc(), tii->get(TargetOpcode::COPY), 467 li->reg) 468 .addReg(newVReg, RegState::Kill); 469 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 470 471 // Save the current end. We may need it to add a new range if the 472 // current range runs of the end of the MBB. 473 SlotIndex newKillRangeEnd = LRI->end; 474 LRI->end = copyIdx.getDefIndex(); 475 476 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) { 477 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) && 478 "PHI kill range doesn't reach kill-block end. Not sane."); 479 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB), 480 newKillRangeEnd, newVNI)); 481 } 482 483 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(), 484 copyMI, true, 485 lis->getVNInfoAllocator()); 486 newKillVNI->setHasPHIKill(true); 487 li->addRange(LiveRange(copyIdx.getDefIndex(), 488 lis->getMBBEndIdx(killMBB), 489 newKillVNI)); 490 } 491 newVNI->setHasPHIKill(false); 492 493 return newLI; 494 } 495 496}; 497 498} // end anonymous namespace 499 500 501namespace llvm { 502Spiller *createInlineSpiller(MachineFunctionPass &pass, 503 MachineFunction &mf, 504 VirtRegMap &vrm); 505} 506 507llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass, 508 MachineFunction &mf, 509 VirtRegMap &vrm) { 510 switch (spillerOpt) { 511 default: assert(0 && "unknown spiller"); 512 case trivial: return new TrivialSpiller(pass, mf, vrm); 513 case standard: return new StandardSpiller(pass, mf, vrm); 514 case splitting: return new SplittingSpiller(pass, mf, vrm); 515 case inline_: return createInlineSpiller(pass, mf, vrm); 516 } 517} 518