Spiller.cpp revision 47a1c339ca204a78f86153b814ed82adb46ec4c1
1//===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#define DEBUG_TYPE "spiller" 11 12#include "Spiller.h" 13#include "VirtRegMap.h" 14#include "llvm/CodeGen/LiveIntervalAnalysis.h" 15#include "llvm/CodeGen/MachineFrameInfo.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineInstrBuilder.h" 18#include "llvm/CodeGen/MachineLoopInfo.h" 19#include "llvm/CodeGen/MachineRegisterInfo.h" 20#include "llvm/Target/TargetMachine.h" 21#include "llvm/Target/TargetInstrInfo.h" 22#include "llvm/Support/CommandLine.h" 23#include "llvm/Support/Debug.h" 24#include "llvm/Support/ErrorHandling.h" 25#include "llvm/Support/raw_ostream.h" 26#include <set> 27 28using namespace llvm; 29 30namespace { 31 enum SpillerName { trivial, standard, splitting, inline_ }; 32} 33 34static cl::opt<SpillerName> 35spillerOpt("spiller", 36 cl::desc("Spiller to use: (default: standard)"), 37 cl::Prefix, 38 cl::values(clEnumVal(trivial, "trivial spiller"), 39 clEnumVal(standard, "default spiller"), 40 clEnumVal(splitting, "splitting spiller"), 41 clEnumValN(inline_, "inline", "inline spiller"), 42 clEnumValEnd), 43 cl::init(standard)); 44 45// Spiller virtual destructor implementation. 46Spiller::~Spiller() {} 47 48namespace { 49 50/// Utility class for spillers. 51class SpillerBase : public Spiller { 52protected: 53 MachineFunctionPass *pass; 54 MachineFunction *mf; 55 VirtRegMap *vrm; 56 LiveIntervals *lis; 57 MachineFrameInfo *mfi; 58 MachineRegisterInfo *mri; 59 const TargetInstrInfo *tii; 60 const TargetRegisterInfo *tri; 61 62 /// Construct a spiller base. 63 SpillerBase(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) 64 : pass(&pass), mf(&mf), vrm(&vrm) 65 { 66 lis = &pass.getAnalysis<LiveIntervals>(); 67 mfi = mf.getFrameInfo(); 68 mri = &mf.getRegInfo(); 69 tii = mf.getTarget().getInstrInfo(); 70 tri = mf.getTarget().getRegisterInfo(); 71 } 72 73 /// Add spill ranges for every use/def of the live interval, inserting loads 74 /// immediately before each use, and stores after each def. No folding or 75 /// remat is attempted. 76 void trivialSpillEverywhere(LiveInterval *li, 77 SmallVectorImpl<LiveInterval*> &newIntervals) { 78 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n"); 79 80 assert(li->weight != HUGE_VALF && 81 "Attempting to spill already spilled value."); 82 83 assert(!li->isStackSlot() && 84 "Trying to spill a stack slot."); 85 86 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n"); 87 88 const TargetRegisterClass *trc = mri->getRegClass(li->reg); 89 unsigned ss = vrm->assignVirt2StackSlot(li->reg); 90 91 // Iterate over reg uses/defs. 92 for (MachineRegisterInfo::reg_iterator 93 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) { 94 95 // Grab the use/def instr. 96 MachineInstr *mi = &*regItr; 97 98 DEBUG(dbgs() << " Processing " << *mi); 99 100 // Step regItr to the next use/def instr. 101 do { 102 ++regItr; 103 } while (regItr != mri->reg_end() && (&*regItr == mi)); 104 105 // Collect uses & defs for this instr. 106 SmallVector<unsigned, 2> indices; 107 bool hasUse = false; 108 bool hasDef = false; 109 for (unsigned i = 0; i != mi->getNumOperands(); ++i) { 110 MachineOperand &op = mi->getOperand(i); 111 if (!op.isReg() || op.getReg() != li->reg) 112 continue; 113 hasUse |= mi->getOperand(i).isUse(); 114 hasDef |= mi->getOperand(i).isDef(); 115 indices.push_back(i); 116 } 117 118 // Create a new vreg & interval for this instr. 119 unsigned newVReg = mri->createVirtualRegister(trc); 120 vrm->grow(); 121 vrm->assignVirt2StackSlot(newVReg, ss); 122 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg); 123 newLI->weight = HUGE_VALF; 124 125 // Update the reg operands & kill flags. 126 for (unsigned i = 0; i < indices.size(); ++i) { 127 unsigned mopIdx = indices[i]; 128 MachineOperand &mop = mi->getOperand(mopIdx); 129 mop.setReg(newVReg); 130 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) { 131 mop.setIsKill(true); 132 } 133 } 134 assert(hasUse || hasDef); 135 136 // Insert reload if necessary. 137 MachineBasicBlock::iterator miItr(mi); 138 if (hasUse) { 139 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc, 140 tri); 141 MachineInstr *loadInstr(prior(miItr)); 142 SlotIndex loadIndex = 143 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex(); 144 vrm->addSpillSlotUse(ss, loadInstr); 145 SlotIndex endIndex = loadIndex.getNextIndex(); 146 VNInfo *loadVNI = 147 newLI->getNextValue(loadIndex, 0, lis->getVNInfoAllocator()); 148 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI)); 149 } 150 151 // Insert store if necessary. 152 if (hasDef) { 153 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, 154 true, ss, trc, tri); 155 MachineInstr *storeInstr(llvm::next(miItr)); 156 SlotIndex storeIndex = 157 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex(); 158 vrm->addSpillSlotUse(ss, storeInstr); 159 SlotIndex beginIndex = storeIndex.getPrevIndex(); 160 VNInfo *storeVNI = 161 newLI->getNextValue(beginIndex, 0, lis->getVNInfoAllocator()); 162 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI)); 163 } 164 165 newIntervals.push_back(newLI); 166 } 167 } 168}; 169 170} // end anonymous namespace 171 172namespace { 173 174/// Spills any live range using the spill-everywhere method with no attempt at 175/// folding. 176class TrivialSpiller : public SpillerBase { 177public: 178 179 TrivialSpiller(MachineFunctionPass &pass, MachineFunction &mf, 180 VirtRegMap &vrm) 181 : SpillerBase(pass, mf, vrm) {} 182 183 void spill(LiveInterval *li, 184 SmallVectorImpl<LiveInterval*> &newIntervals, 185 SmallVectorImpl<LiveInterval*> &) { 186 // Ignore spillIs - we don't use it. 187 trivialSpillEverywhere(li, newIntervals); 188 } 189}; 190 191} // end anonymous namespace 192 193namespace { 194 195/// Falls back on LiveIntervals::addIntervalsForSpills. 196class StandardSpiller : public Spiller { 197protected: 198 LiveIntervals *lis; 199 MachineLoopInfo *loopInfo; 200 VirtRegMap *vrm; 201public: 202 StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf, 203 VirtRegMap &vrm) 204 : lis(&pass.getAnalysis<LiveIntervals>()), 205 loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()), 206 vrm(&vrm) {} 207 208 /// Falls back on LiveIntervals::addIntervalsForSpills. 209 void spill(LiveInterval *li, 210 SmallVectorImpl<LiveInterval*> &newIntervals, 211 SmallVectorImpl<LiveInterval*> &spillIs) { 212 std::vector<LiveInterval*> added = 213 lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm); 214 newIntervals.insert(newIntervals.end(), added.begin(), added.end()); 215 } 216}; 217 218} // end anonymous namespace 219 220namespace { 221 222/// When a call to spill is placed this spiller will first try to break the 223/// interval up into its component values (one new interval per value). 224/// If this fails, or if a call is placed to spill a previously split interval 225/// then the spiller falls back on the standard spilling mechanism. 226class SplittingSpiller : public StandardSpiller { 227public: 228 SplittingSpiller(MachineFunctionPass &pass, MachineFunction &mf, 229 VirtRegMap &vrm) 230 : StandardSpiller(pass, mf, vrm) { 231 mri = &mf.getRegInfo(); 232 tii = mf.getTarget().getInstrInfo(); 233 tri = mf.getTarget().getRegisterInfo(); 234 } 235 236 void spill(LiveInterval *li, 237 SmallVectorImpl<LiveInterval*> &newIntervals, 238 SmallVectorImpl<LiveInterval*> &spillIs) { 239 if (worthTryingToSplit(li)) 240 tryVNISplit(li); 241 else 242 StandardSpiller::spill(li, newIntervals, spillIs); 243 } 244 245private: 246 247 MachineRegisterInfo *mri; 248 const TargetInstrInfo *tii; 249 const TargetRegisterInfo *tri; 250 DenseSet<LiveInterval*> alreadySplit; 251 252 bool worthTryingToSplit(LiveInterval *li) const { 253 return (!alreadySplit.count(li) && li->getNumValNums() > 1); 254 } 255 256 /// Try to break a LiveInterval into its component values. 257 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li) { 258 259 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n"); 260 261 std::vector<LiveInterval*> added; 262 SmallVector<VNInfo*, 4> vnis; 263 264 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis)); 265 266 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(), 267 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) { 268 VNInfo *vni = *vniItr; 269 270 // Skip unused VNIs. 271 if (vni->isUnused()) 272 continue; 273 274 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as "); 275 LiveInterval *splitInterval = extractVNI(li, vni); 276 277 if (splitInterval != 0) { 278 DEBUG(dbgs() << *splitInterval << "\n"); 279 added.push_back(splitInterval); 280 alreadySplit.insert(splitInterval); 281 } else { 282 DEBUG(dbgs() << "0\n"); 283 } 284 } 285 286 DEBUG(dbgs() << "Original LI: " << *li << "\n"); 287 288 // If there original interval still contains some live ranges 289 // add it to added and alreadySplit. 290 if (!li->empty()) { 291 added.push_back(li); 292 alreadySplit.insert(li); 293 } 294 295 return added; 296 } 297 298 /// Extract the given value number from the interval. 299 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const { 300 assert((lis->getInstructionFromIndex(vni->def) != 0 || vni->isPHIDef()) && 301 "Def index not sane?"); 302 303 // Create a new vreg and live interval, copy VNI ranges over. 304 const TargetRegisterClass *trc = mri->getRegClass(li->reg); 305 unsigned newVReg = mri->createVirtualRegister(trc); 306 vrm->grow(); 307 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg); 308 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator()); 309 310 // Start by copying all live ranges in the VN to the new interval. 311 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end(); 312 rItr != rEnd; ++rItr) { 313 if (rItr->valno == vni) { 314 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI)); 315 } 316 } 317 318 // Erase the old VNI & ranges. 319 li->removeValNo(vni); 320 321 // Collect all current uses of the register belonging to the given VNI. 322 // We'll use this to rename the register after we've dealt with the def. 323 std::set<MachineInstr*> uses; 324 for (MachineRegisterInfo::use_iterator 325 useItr = mri->use_begin(li->reg), useEnd = mri->use_end(); 326 useItr != useEnd; ++useItr) { 327 uses.insert(&*useItr); 328 } 329 330 // Process the def instruction for this VNI. 331 if (newVNI->isPHIDef()) { 332 // Insert a copy at the start of the MBB. The range proceeding the 333 // copy will be attached to the original LiveInterval. 334 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def); 335 MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(), 336 tii->get(TargetOpcode::COPY), newVReg) 337 .addReg(li->reg, RegState::Kill); 338 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 339 SlotIndex phiDefIdx = lis->getMBBStartIdx(defMBB); 340 assert(lis->getInstructionFromIndex(phiDefIdx) == 0 && 341 "PHI def index points at actual instruction."); 342 VNInfo *phiDefVNI = li->getNextValue(phiDefIdx, 343 0, lis->getVNInfoAllocator()); 344 phiDefVNI->setIsPHIDef(true); 345 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI)); 346 LiveRange *oldPHIDefRange = 347 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB)); 348 349 // If the old phi def starts in the middle of the range chop it up. 350 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) { 351 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end, 352 oldPHIDefRange->valno); 353 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB); 354 newLI->addRange(oldPHIDefRange2); 355 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) { 356 // Otherwise if it's at the start of the range just trim it. 357 oldPHIDefRange->start = copyIdx.getDefIndex(); 358 } else { 359 assert(false && "PHI def range doesn't cover PHI def?"); 360 } 361 362 newVNI->def = copyIdx.getDefIndex(); 363 newVNI->setCopy(copyMI); 364 newVNI->setIsPHIDef(false); // not a PHI def anymore. 365 } else { 366 // non-PHI def. Rename the def. If it's two-addr that means renaming the 367 // use and inserting a new copy too. 368 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def); 369 // We'll rename this now, so we can remove it from uses. 370 uses.erase(defInst); 371 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg); 372 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx), 373 twoAddrUseIsUndef = false; 374 375 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) { 376 MachineOperand &mo = defInst->getOperand(i); 377 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) { 378 mo.setReg(newVReg); 379 if (isTwoAddr && mo.isUse() && mo.isUndef()) 380 twoAddrUseIsUndef = true; 381 } 382 } 383 384 SlotIndex defIdx = lis->getInstructionIndex(defInst); 385 newVNI->def = defIdx.getDefIndex(); 386 387 if (isTwoAddr && !twoAddrUseIsUndef) { 388 MachineBasicBlock *defMBB = defInst->getParent(); 389 MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(), 390 tii->get(TargetOpcode::COPY), newVReg) 391 .addReg(li->reg, RegState::Kill); 392 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 393 LiveRange *origUseRange = 394 li->getLiveRangeContaining(newVNI->def.getUseIndex()); 395 origUseRange->end = copyIdx.getDefIndex(); 396 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI, 397 lis->getVNInfoAllocator()); 398 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI); 399 newLI->addRange(copyRange); 400 } 401 } 402 403 for (std::set<MachineInstr*>::iterator 404 usesItr = uses.begin(), usesEnd = uses.end(); 405 usesItr != usesEnd; ++usesItr) { 406 MachineInstr *useInst = *usesItr; 407 SlotIndex useIdx = lis->getInstructionIndex(useInst); 408 LiveRange *useRange = 409 newLI->getLiveRangeContaining(useIdx.getUseIndex()); 410 411 // If this use doesn't belong to the new interval skip it. 412 if (useRange == 0) 413 continue; 414 415 // This use doesn't belong to the VNI, skip it. 416 if (useRange->valno != newVNI) 417 continue; 418 419 // Check if this instr is two address. 420 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg); 421 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx); 422 423 // Rename uses (and defs for two-address instrs). 424 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) { 425 MachineOperand &mo = useInst->getOperand(i); 426 if (mo.isReg() && (mo.isUse() || isTwoAddress) && 427 (mo.getReg() == li->reg)) { 428 mo.setReg(newVReg); 429 } 430 } 431 432 // If this is a two address instruction we've got some extra work to do. 433 if (isTwoAddress) { 434 // We modified the def operand, so we need to copy back to the original 435 // reg. 436 MachineBasicBlock *useMBB = useInst->getParent(); 437 MachineBasicBlock::iterator useItr(useInst); 438 MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(), 439 tii->get(TargetOpcode::COPY), newVReg) 440 .addReg(li->reg, RegState::Kill); 441 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 442 443 // Change the old two-address defined range & vni to start at 444 // (and be defined by) the copy. 445 LiveRange *origDefRange = 446 li->getLiveRangeContaining(useIdx.getDefIndex()); 447 origDefRange->start = copyIdx.getDefIndex(); 448 origDefRange->valno->def = copyIdx.getDefIndex(); 449 origDefRange->valno->setCopy(copyMI); 450 451 // Insert a new range & vni for the two-address-to-copy value. This 452 // will be attached to the new live interval. 453 VNInfo *copyVNI = 454 newLI->getNextValue(useIdx.getDefIndex(), 0, 455 lis->getVNInfoAllocator()); 456 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI); 457 newLI->addRange(copyRange); 458 } 459 } 460 461 // Iterate over any PHI kills - we'll need to insert new copies for them. 462 for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end(); 463 LRI != LRE; ++LRI) { 464 if (LRI->valno != newVNI) 465 continue; 466 SlotIndex killIdx = LRI->end; 467 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx); 468 MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(), 469 DebugLoc(), tii->get(TargetOpcode::COPY), 470 li->reg) 471 .addReg(newVReg, RegState::Kill); 472 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 473 474 // Save the current end. We may need it to add a new range if the 475 // current range runs of the end of the MBB. 476 SlotIndex newKillRangeEnd = LRI->end; 477 LRI->end = copyIdx.getDefIndex(); 478 479 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) { 480 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) && 481 "PHI kill range doesn't reach kill-block end. Not sane."); 482 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB), 483 newKillRangeEnd, newVNI)); 484 } 485 486 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(), 487 copyMI, lis->getVNInfoAllocator()); 488 newKillVNI->setHasPHIKill(true); 489 li->addRange(LiveRange(copyIdx.getDefIndex(), 490 lis->getMBBEndIdx(killMBB), 491 newKillVNI)); 492 } 493 newVNI->setHasPHIKill(false); 494 495 return newLI; 496 } 497 498}; 499 500} // end anonymous namespace 501 502 503namespace llvm { 504Spiller *createInlineSpiller(MachineFunctionPass &pass, 505 MachineFunction &mf, 506 VirtRegMap &vrm); 507} 508 509llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass, 510 MachineFunction &mf, 511 VirtRegMap &vrm) { 512 switch (spillerOpt) { 513 default: assert(0 && "unknown spiller"); 514 case trivial: return new TrivialSpiller(pass, mf, vrm); 515 case standard: return new StandardSpiller(pass, mf, vrm); 516 case splitting: return new SplittingSpiller(pass, mf, vrm); 517 case inline_: return createInlineSpiller(pass, mf, vrm); 518 } 519} 520