LiveIntervalAnalysis.cpp revision 3749943648772743c9c0e852553e50e6700a0c1b
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> EnableFastSpilling("fast-spill", 54 cl::init(false), cl::Hidden); 55 56STATISTIC(numIntervals , "Number of original intervals"); 57STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 58STATISTIC(numSplits , "Number of intervals split"); 59 60char LiveIntervals::ID = 0; 61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 64 AU.setPreservesCFG(); 65 AU.addRequired<AliasAnalysis>(); 66 AU.addPreserved<AliasAnalysis>(); 67 AU.addPreserved<LiveVariables>(); 68 AU.addRequired<LiveVariables>(); 69 AU.addPreservedID(MachineLoopInfoID); 70 AU.addPreservedID(MachineDominatorsID); 71 72 if (!StrongPHIElim) { 73 AU.addPreservedID(PHIEliminationID); 74 AU.addRequiredID(PHIEliminationID); 75 } 76 77 AU.addRequiredID(TwoAddressInstructionPassID); 78 AU.addPreserved<ProcessImplicitDefs>(); 79 AU.addRequired<ProcessImplicitDefs>(); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83} 84 85void LiveIntervals::releaseMemory() { 86 // Free the live intervals themselves. 87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 88 E = r2iMap_.end(); I != E; ++I) 89 delete I->second; 90 91 r2iMap_.clear(); 92 93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 94 VNInfoAllocator.DestroyAll(); 95 while (!CloneMIs.empty()) { 96 MachineInstr *MI = CloneMIs.back(); 97 CloneMIs.pop_back(); 98 mf_->DeleteMachineInstr(MI); 99 } 100} 101 102/// runOnMachineFunction - Register allocate the whole function 103/// 104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 105 mf_ = &fn; 106 mri_ = &mf_->getRegInfo(); 107 tm_ = &fn.getTarget(); 108 tri_ = tm_->getRegisterInfo(); 109 tii_ = tm_->getInstrInfo(); 110 aa_ = &getAnalysis<AliasAnalysis>(); 111 lv_ = &getAnalysis<LiveVariables>(); 112 indexes_ = &getAnalysis<SlotIndexes>(); 113 allocatableRegs_ = tri_->getAllocatableSet(fn); 114 115 computeIntervals(); 116 117 numIntervals += getNumIntervals(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 for (const_iterator I = begin(), E = end(); I != E; ++I) { 127 I->second->print(OS, tri_); 128 OS << "\n"; 129 } 130 131 printInstrs(OS); 132} 133 134void LiveIntervals::printInstrs(raw_ostream &OS) const { 135 OS << "********** MACHINEINSTRS **********\n"; 136 137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 138 mbbi != mbbe; ++mbbi) { 139 OS << "BB#" << mbbi->getNumber() 140 << ":\t\t# derived from " << mbbi->getName() << "\n"; 141 for (MachineBasicBlock::iterator mii = mbbi->begin(), 142 mie = mbbi->end(); mii != mie; ++mii) { 143 if (mii->isDebugValue()) 144 OS << " \t" << *mii; 145 else 146 OS << getInstructionIndex(mii) << '\t' << *mii; 147 } 148 } 149} 150 151void LiveIntervals::dumpInstrs() const { 152 printInstrs(dbgs()); 153} 154 155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 156 VirtRegMap &vrm, unsigned reg) { 157 // We don't handle fancy stuff crossing basic block boundaries 158 if (li.ranges.size() != 1) 159 return true; 160 const LiveRange &range = li.ranges.front(); 161 SlotIndex idx = range.start.getBaseIndex(); 162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 163 164 // Skip deleted instructions 165 MachineInstr *firstMI = getInstructionFromIndex(idx); 166 while (!firstMI && idx != end) { 167 idx = idx.getNextIndex(); 168 firstMI = getInstructionFromIndex(idx); 169 } 170 if (!firstMI) 171 return false; 172 173 // Find last instruction in range 174 SlotIndex lastIdx = end.getPrevIndex(); 175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 176 while (!lastMI && lastIdx != idx) { 177 lastIdx = lastIdx.getPrevIndex(); 178 lastMI = getInstructionFromIndex(lastIdx); 179 } 180 if (!lastMI) 181 return false; 182 183 // Range cannot cross basic block boundaries or terminators 184 MachineBasicBlock *MBB = firstMI->getParent(); 185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 186 return true; 187 188 MachineBasicBlock::const_iterator E = lastMI; 189 ++E; 190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 191 const MachineInstr &MI = *I; 192 193 // Allow copies to and from li.reg 194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 196 if (SrcReg == li.reg || DstReg == li.reg) 197 continue; 198 199 // Check for operands using reg 200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 201 const MachineOperand& mop = MI.getOperand(i); 202 if (!mop.isReg()) 203 continue; 204 unsigned PhysReg = mop.getReg(); 205 if (PhysReg == 0 || PhysReg == li.reg) 206 continue; 207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 208 if (!vrm.hasPhys(PhysReg)) 209 continue; 210 PhysReg = vrm.getPhys(PhysReg); 211 } 212 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 213 return true; 214 } 215 } 216 217 // No conflicts found. 218 return false; 219} 220 221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except 222/// it checks for sub-register reference and it can check use as well. 223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li, 224 unsigned Reg, bool CheckUse, 225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 226 for (LiveInterval::Ranges::const_iterator 227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 228 for (SlotIndex index = I->start.getBaseIndex(), 229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 230 index != end; 231 index = index.getNextIndex()) { 232 MachineInstr *MI = getInstructionFromIndex(index); 233 if (!MI) 234 continue; // skip deleted instructions 235 236 if (JoinedCopies.count(MI)) 237 continue; 238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 239 MachineOperand& MO = MI->getOperand(i); 240 if (!MO.isReg()) 241 continue; 242 if (MO.isUse() && !CheckUse) 243 continue; 244 unsigned PhysReg = MO.getReg(); 245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg)) 246 continue; 247 if (tri_->isSubRegister(Reg, PhysReg)) 248 return true; 249 } 250 } 251 } 252 253 return false; 254} 255 256#ifndef NDEBUG 257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 258 if (TargetRegisterInfo::isPhysicalRegister(reg)) 259 dbgs() << tri_->getName(reg); 260 else 261 dbgs() << "%reg" << reg; 262} 263#endif 264 265static 266bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 267 unsigned Reg = MI.getOperand(MOIdx).getReg(); 268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 269 const MachineOperand &MO = MI.getOperand(i); 270 if (!MO.isReg()) 271 continue; 272 if (MO.getReg() == Reg && MO.isDef()) { 273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 274 MI.getOperand(MOIdx).getSubReg() && 275 MO.getSubReg()); 276 return true; 277 } 278 } 279 return false; 280} 281 282/// isPartialRedef - Return true if the specified def at the specific index is 283/// partially re-defining the specified live interval. A common case of this is 284/// a definition of the sub-register. 285bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 286 LiveInterval &interval) { 287 if (!MO.getSubReg() || MO.isEarlyClobber()) 288 return false; 289 290 SlotIndex RedefIndex = MIIdx.getDefIndex(); 291 const LiveRange *OldLR = 292 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 293 if (OldLR->valno->isDefAccurate()) { 294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 296 } 297 return false; 298} 299 300void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 301 MachineBasicBlock::iterator mi, 302 SlotIndex MIIdx, 303 MachineOperand& MO, 304 unsigned MOIdx, 305 LiveInterval &interval) { 306 DEBUG({ 307 dbgs() << "\t\tregister: "; 308 printRegName(interval.reg, tri_); 309 }); 310 311 // Virtual registers may be defined multiple times (due to phi 312 // elimination and 2-addr elimination). Much of what we do only has to be 313 // done once for the vreg. We use an empty interval to detect the first 314 // time we see a vreg. 315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 316 if (interval.empty()) { 317 // Get the Idx of the defining instructions. 318 SlotIndex defIndex = MIIdx.getDefIndex(); 319 // Earlyclobbers move back one, so that they overlap the live range 320 // of inputs. 321 if (MO.isEarlyClobber()) 322 defIndex = MIIdx.getUseIndex(); 323 MachineInstr *CopyMI = NULL; 324 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 325 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() || 326 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 327 CopyMI = mi; 328 329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, 330 VNInfoAllocator); 331 assert(ValNo->id == 0 && "First value in interval is not 0?"); 332 333 // Loop over all of the blocks that the vreg is defined in. There are 334 // two cases we have to handle here. The most common case is a vreg 335 // whose lifetime is contained within a basic block. In this case there 336 // will be a single kill, in MBB, which comes after the definition. 337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 338 // FIXME: what about dead vars? 339 SlotIndex killIdx; 340 if (vi.Kills[0] != mi) 341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 342 else 343 killIdx = defIndex.getStoreIndex(); 344 345 // If the kill happens after the definition, we have an intra-block 346 // live range. 347 if (killIdx > defIndex) { 348 assert(vi.AliveBlocks.empty() && 349 "Shouldn't be alive across any blocks!"); 350 LiveRange LR(defIndex, killIdx, ValNo); 351 interval.addRange(LR); 352 DEBUG(dbgs() << " +" << LR << "\n"); 353 ValNo->addKill(killIdx); 354 return; 355 } 356 } 357 358 // The other case we handle is when a virtual register lives to the end 359 // of the defining block, potentially live across some blocks, then is 360 // live into some number of blocks, but gets killed. Start by adding a 361 // range that goes from this definition to the end of the defining block. 362 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 363 DEBUG(dbgs() << " +" << NewLR); 364 interval.addRange(NewLR); 365 366 bool PHIJoin = lv_->isPHIJoin(interval.reg); 367 368 if (PHIJoin) { 369 // A phi join register is killed at the end of the MBB and revived as a new 370 // valno in the killing blocks. 371 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 372 DEBUG(dbgs() << " phi-join"); 373 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 374 ValNo->setHasPHIKill(true); 375 } else { 376 // Iterate over all of the blocks that the variable is completely 377 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 378 // live interval. 379 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 380 E = vi.AliveBlocks.end(); I != E; ++I) { 381 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); 382 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); 383 interval.addRange(LR); 384 DEBUG(dbgs() << " +" << LR); 385 } 386 } 387 388 // Finally, this virtual register is live from the start of any killing 389 // block to the 'use' slot of the killing instruction. 390 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 391 MachineInstr *Kill = vi.Kills[i]; 392 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 393 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); 394 395 // Create interval with one of a NEW value number. Note that this value 396 // number isn't actually defined by an instruction, weird huh? :) 397 if (PHIJoin) { 398 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, 399 VNInfoAllocator); 400 ValNo->setIsPHIDef(true); 401 } 402 LiveRange LR(Start, killIdx, ValNo); 403 interval.addRange(LR); 404 ValNo->addKill(killIdx); 405 DEBUG(dbgs() << " +" << LR); 406 } 407 408 } else { 409 if (MultipleDefsBySameMI(*mi, MOIdx)) 410 // Mutple defs of the same virtual register by the same instruction. e.g. 411 // %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 412 // This is likely due to elimination of REG_SEQUENCE instructions. Return 413 // here since there is nothing to do. 414 return; 415 416 // If this is the second time we see a virtual register definition, it 417 // must be due to phi elimination or two addr elimination. If this is 418 // the result of two address elimination, then the vreg is one of the 419 // def-and-use register operand. 420 421 // It may also be partial redef like this: 422 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 423 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 424 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 425 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 426 // If this is a two-address definition, then we have already processed 427 // the live range. The only problem is that we didn't realize there 428 // are actually two values in the live interval. Because of this we 429 // need to take the LiveRegion that defines this register and split it 430 // into two values. 431 // Two-address vregs should always only be redefined once. This means 432 // that at this point, there should be exactly one value number in it. 433 assert((PartReDef || interval.containsOneValue()) && 434 "Unexpected 2-addr liveint!"); 435 unsigned NumVals = interval.getNumValNums(); 436 SlotIndex DefIndex = interval.getValNumInfo(NumVals-1)->def.getDefIndex(); 437 SlotIndex RedefIndex = MIIdx.getDefIndex(); 438 if (MO.isEarlyClobber()) 439 RedefIndex = MIIdx.getUseIndex(); 440 441 const LiveRange *OldLR = 442 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 443 VNInfo *OldValNo = OldLR->valno; 444 445 // Delete the initial value, which should be short and continuous, 446 // because the 2-addr copy must be in the same MBB as the redef. 447 interval.removeRange(DefIndex, RedefIndex); 448 449 // The new value number (#1) is defined by the instruction we claimed 450 // defined value #0. 451 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 452 false, // update at * 453 VNInfoAllocator); 454 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 455 456 // Value#0 is now defined by the 2-addr instruction. 457 OldValNo->def = RedefIndex; 458 OldValNo->setCopy(0); 459 460 // Add the new live interval which replaces the range for the input copy. 461 LiveRange LR(DefIndex, RedefIndex, ValNo); 462 DEBUG(dbgs() << " replace range with " << LR); 463 interval.addRange(LR); 464 ValNo->addKill(RedefIndex); 465 466 // If this redefinition is dead, we need to add a dummy unit live 467 // range covering the def slot. 468 if (MO.isDead()) 469 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 470 OldValNo)); 471 472 DEBUG({ 473 dbgs() << " RESULT: "; 474 interval.print(dbgs(), tri_); 475 }); 476 } else if (lv_->isPHIJoin(interval.reg)) { 477 // In the case of PHI elimination, each variable definition is only 478 // live until the end of the block. We've already taken care of the 479 // rest of the live range. 480 481 SlotIndex defIndex = MIIdx.getDefIndex(); 482 if (MO.isEarlyClobber()) 483 defIndex = MIIdx.getUseIndex(); 484 485 VNInfo *ValNo; 486 MachineInstr *CopyMI = NULL; 487 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 488 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()|| 489 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 490 CopyMI = mi; 491 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 492 493 SlotIndex killIndex = getMBBEndIdx(mbb); 494 LiveRange LR(defIndex, killIndex, ValNo); 495 interval.addRange(LR); 496 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 497 ValNo->setHasPHIKill(true); 498 DEBUG(dbgs() << " phi-join +" << LR); 499 } else { 500 llvm_unreachable("Multiply defined register"); 501 } 502 } 503 504 DEBUG(dbgs() << '\n'); 505} 506 507void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 508 MachineBasicBlock::iterator mi, 509 SlotIndex MIIdx, 510 MachineOperand& MO, 511 LiveInterval &interval, 512 MachineInstr *CopyMI) { 513 // A physical register cannot be live across basic block, so its 514 // lifetime must end somewhere in its defining basic block. 515 DEBUG({ 516 dbgs() << "\t\tregister: "; 517 printRegName(interval.reg, tri_); 518 }); 519 520 SlotIndex baseIndex = MIIdx; 521 SlotIndex start = baseIndex.getDefIndex(); 522 // Earlyclobbers move back one. 523 if (MO.isEarlyClobber()) 524 start = MIIdx.getUseIndex(); 525 SlotIndex end = start; 526 527 // If it is not used after definition, it is considered dead at 528 // the instruction defining it. Hence its interval is: 529 // [defSlot(def), defSlot(def)+1) 530 // For earlyclobbers, the defSlot was pushed back one; the extra 531 // advance below compensates. 532 if (MO.isDead()) { 533 DEBUG(dbgs() << " dead"); 534 end = start.getStoreIndex(); 535 goto exit; 536 } 537 538 // If it is not dead on definition, it must be killed by a 539 // subsequent instruction. Hence its interval is: 540 // [defSlot(def), useSlot(kill)+1) 541 baseIndex = baseIndex.getNextIndex(); 542 while (++mi != MBB->end()) { 543 544 if (mi->isDebugValue()) 545 continue; 546 if (getInstructionFromIndex(baseIndex) == 0) 547 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 548 549 if (mi->killsRegister(interval.reg, tri_)) { 550 DEBUG(dbgs() << " killed"); 551 end = baseIndex.getDefIndex(); 552 goto exit; 553 } else { 554 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_); 555 if (DefIdx != -1) { 556 if (mi->isRegTiedToUseOperand(DefIdx)) { 557 // Two-address instruction. 558 end = baseIndex.getDefIndex(); 559 } else { 560 // Another instruction redefines the register before it is ever read. 561 // Then the register is essentially dead at the instruction that 562 // defines it. Hence its interval is: 563 // [defSlot(def), defSlot(def)+1) 564 DEBUG(dbgs() << " dead"); 565 end = start.getStoreIndex(); 566 } 567 goto exit; 568 } 569 } 570 571 baseIndex = baseIndex.getNextIndex(); 572 } 573 574 // The only case we should have a dead physreg here without a killing or 575 // instruction where we know it's dead is if it is live-in to the function 576 // and never used. Another possible case is the implicit use of the 577 // physical register has been deleted by two-address pass. 578 end = start.getStoreIndex(); 579 580exit: 581 assert(start < end && "did not find end of interval?"); 582 583 // Already exists? Extend old live interval. 584 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 585 bool Extend = OldLR != interval.end(); 586 VNInfo *ValNo = Extend 587 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 588 if (MO.isEarlyClobber() && Extend) 589 ValNo->setHasRedefByEC(true); 590 LiveRange LR(start, end, ValNo); 591 interval.addRange(LR); 592 LR.valno->addKill(end); 593 DEBUG(dbgs() << " +" << LR << '\n'); 594} 595 596void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 597 MachineBasicBlock::iterator MI, 598 SlotIndex MIIdx, 599 MachineOperand& MO, 600 unsigned MOIdx) { 601 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 602 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 603 getOrCreateInterval(MO.getReg())); 604 else if (allocatableRegs_[MO.getReg()]) { 605 MachineInstr *CopyMI = NULL; 606 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 607 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() || 608 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 609 CopyMI = MI; 610 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 611 getOrCreateInterval(MO.getReg()), CopyMI); 612 // Def of a register also defines its sub-registers. 613 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 614 // If MI also modifies the sub-register explicitly, avoid processing it 615 // more than once. Do not pass in TRI here so it checks for exact match. 616 if (!MI->modifiesRegister(*AS)) 617 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 618 getOrCreateInterval(*AS), 0); 619 } 620} 621 622void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 623 SlotIndex MIIdx, 624 LiveInterval &interval, bool isAlias) { 625 DEBUG({ 626 dbgs() << "\t\tlivein register: "; 627 printRegName(interval.reg, tri_); 628 }); 629 630 // Look for kills, if it reaches a def before it's killed, then it shouldn't 631 // be considered a livein. 632 MachineBasicBlock::iterator mi = MBB->begin(); 633 MachineBasicBlock::iterator E = MBB->end(); 634 // Skip over DBG_VALUE at the start of the MBB. 635 if (mi != E && mi->isDebugValue()) { 636 while (++mi != E && mi->isDebugValue()) 637 ; 638 if (mi == E) 639 // MBB is empty except for DBG_VALUE's. 640 return; 641 } 642 643 SlotIndex baseIndex = MIIdx; 644 SlotIndex start = baseIndex; 645 if (getInstructionFromIndex(baseIndex) == 0) 646 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 647 648 SlotIndex end = baseIndex; 649 bool SeenDefUse = false; 650 651 while (mi != E) { 652 if (mi->killsRegister(interval.reg, tri_)) { 653 DEBUG(dbgs() << " killed"); 654 end = baseIndex.getDefIndex(); 655 SeenDefUse = true; 656 break; 657 } else if (mi->modifiesRegister(interval.reg, tri_)) { 658 // Another instruction redefines the register before it is ever read. 659 // Then the register is essentially dead at the instruction that defines 660 // it. Hence its interval is: 661 // [defSlot(def), defSlot(def)+1) 662 DEBUG(dbgs() << " dead"); 663 end = start.getStoreIndex(); 664 SeenDefUse = true; 665 break; 666 } 667 668 while (++mi != E && mi->isDebugValue()) 669 // Skip over DBG_VALUE. 670 ; 671 if (mi != E) 672 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 673 } 674 675 // Live-in register might not be used at all. 676 if (!SeenDefUse) { 677 if (isAlias) { 678 DEBUG(dbgs() << " dead"); 679 end = MIIdx.getStoreIndex(); 680 } else { 681 DEBUG(dbgs() << " live through"); 682 end = baseIndex; 683 } 684 } 685 686 VNInfo *vni = 687 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 688 0, false, VNInfoAllocator); 689 vni->setIsPHIDef(true); 690 LiveRange LR(start, end, vni); 691 692 interval.addRange(LR); 693 LR.valno->addKill(end); 694 DEBUG(dbgs() << " +" << LR << '\n'); 695} 696 697/// computeIntervals - computes the live intervals for virtual 698/// registers. for some ordering of the machine instructions [1,N] a 699/// live interval is an interval [i, j) where 1 <= i <= j < N for 700/// which a variable is live 701void LiveIntervals::computeIntervals() { 702 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 703 << "********** Function: " 704 << ((Value*)mf_->getFunction())->getName() << '\n'); 705 706 SmallVector<unsigned, 8> UndefUses; 707 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 708 MBBI != E; ++MBBI) { 709 MachineBasicBlock *MBB = MBBI; 710 if (MBB->empty()) 711 continue; 712 713 // Track the index of the current machine instr. 714 SlotIndex MIIndex = getMBBStartIdx(MBB); 715 DEBUG(dbgs() << "BB#" << MBB->getNumber() 716 << ":\t\t# derived from " << MBB->getName() << "\n"); 717 718 // Create intervals for live-ins to this BB first. 719 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 720 LE = MBB->livein_end(); LI != LE; ++LI) { 721 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 722 // Multiple live-ins can alias the same register. 723 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 724 if (!hasInterval(*AS)) 725 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 726 true); 727 } 728 729 // Skip over empty initial indices. 730 if (getInstructionFromIndex(MIIndex) == 0) 731 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 732 733 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 734 MI != miEnd; ++MI) { 735 DEBUG(dbgs() << MIIndex << "\t" << *MI); 736 if (MI->isDebugValue()) 737 continue; 738 739 // Handle defs. 740 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 741 MachineOperand &MO = MI->getOperand(i); 742 if (!MO.isReg() || !MO.getReg()) 743 continue; 744 745 // handle register defs - build intervals 746 if (MO.isDef()) 747 handleRegisterDef(MBB, MI, MIIndex, MO, i); 748 else if (MO.isUndef()) 749 UndefUses.push_back(MO.getReg()); 750 } 751 752 // Move to the next instr slot. 753 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 754 } 755 } 756 757 // Create empty intervals for registers defined by implicit_def's (except 758 // for those implicit_def that define values which are liveout of their 759 // blocks. 760 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 761 unsigned UndefReg = UndefUses[i]; 762 (void)getOrCreateInterval(UndefReg); 763 } 764} 765 766LiveInterval* LiveIntervals::createInterval(unsigned reg) { 767 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 768 return new LiveInterval(reg, Weight); 769} 770 771/// dupInterval - Duplicate a live interval. The caller is responsible for 772/// managing the allocated memory. 773LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 774 LiveInterval *NewLI = createInterval(li->reg); 775 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 776 return NewLI; 777} 778 779/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 780/// copy field and returns the source register that defines it. 781unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 782 if (!VNI->getCopy()) 783 return 0; 784 785 if (VNI->getCopy()->isExtractSubreg()) { 786 // If it's extracting out of a physical register, return the sub-register. 787 unsigned Reg = VNI->getCopy()->getOperand(1).getReg(); 788 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 789 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm(); 790 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg(); 791 if (SrcSubReg == DstSubReg) 792 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3 793 // reg1034 can still be coalesced to EDX. 794 return Reg; 795 assert(DstSubReg == 0); 796 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm()); 797 } 798 return Reg; 799 } else if (VNI->getCopy()->isInsertSubreg() || 800 VNI->getCopy()->isSubregToReg()) 801 return VNI->getCopy()->getOperand(2).getReg(); 802 803 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 804 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg)) 805 return SrcReg; 806 llvm_unreachable("Unrecognized copy instruction!"); 807 return 0; 808} 809 810//===----------------------------------------------------------------------===// 811// Register allocator hooks. 812// 813 814/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 815/// allow one) virtual register operand, then its uses are implicitly using 816/// the register. Returns the virtual register. 817unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 818 MachineInstr *MI) const { 819 unsigned RegOp = 0; 820 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 821 MachineOperand &MO = MI->getOperand(i); 822 if (!MO.isReg() || !MO.isUse()) 823 continue; 824 unsigned Reg = MO.getReg(); 825 if (Reg == 0 || Reg == li.reg) 826 continue; 827 828 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 829 !allocatableRegs_[Reg]) 830 continue; 831 // FIXME: For now, only remat MI with at most one register operand. 832 assert(!RegOp && 833 "Can't rematerialize instruction with multiple register operand!"); 834 RegOp = MO.getReg(); 835#ifndef NDEBUG 836 break; 837#endif 838 } 839 return RegOp; 840} 841 842/// isValNoAvailableAt - Return true if the val# of the specified interval 843/// which reaches the given instruction also reaches the specified use index. 844bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 845 SlotIndex UseIdx) const { 846 SlotIndex Index = getInstructionIndex(MI); 847 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 848 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 849 return UI != li.end() && UI->valno == ValNo; 850} 851 852/// isReMaterializable - Returns true if the definition MI of the specified 853/// val# of the specified interval is re-materializable. 854bool LiveIntervals::isReMaterializable(const LiveInterval &li, 855 const VNInfo *ValNo, MachineInstr *MI, 856 SmallVectorImpl<LiveInterval*> &SpillIs, 857 bool &isLoad) { 858 if (DisableReMat) 859 return false; 860 861 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 862 return false; 863 864 // Target-specific code can mark an instruction as being rematerializable 865 // if it has one virtual reg use, though it had better be something like 866 // a PIC base register which is likely to be live everywhere. 867 unsigned ImpUse = getReMatImplicitUse(li, MI); 868 if (ImpUse) { 869 const LiveInterval &ImpLi = getInterval(ImpUse); 870 for (MachineRegisterInfo::use_nodbg_iterator 871 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end(); 872 ri != re; ++ri) { 873 MachineInstr *UseMI = &*ri; 874 SlotIndex UseIdx = getInstructionIndex(UseMI); 875 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 876 continue; 877 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 878 return false; 879 } 880 881 // If a register operand of the re-materialized instruction is going to 882 // be spilled next, then it's not legal to re-materialize this instruction. 883 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 884 if (ImpUse == SpillIs[i]->reg) 885 return false; 886 } 887 return true; 888} 889 890/// isReMaterializable - Returns true if the definition MI of the specified 891/// val# of the specified interval is re-materializable. 892bool LiveIntervals::isReMaterializable(const LiveInterval &li, 893 const VNInfo *ValNo, MachineInstr *MI) { 894 SmallVector<LiveInterval*, 4> Dummy1; 895 bool Dummy2; 896 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 897} 898 899/// isReMaterializable - Returns true if every definition of MI of every 900/// val# of the specified interval is re-materializable. 901bool LiveIntervals::isReMaterializable(const LiveInterval &li, 902 SmallVectorImpl<LiveInterval*> &SpillIs, 903 bool &isLoad) { 904 isLoad = false; 905 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 906 i != e; ++i) { 907 const VNInfo *VNI = *i; 908 if (VNI->isUnused()) 909 continue; // Dead val#. 910 // Is the def for the val# rematerializable? 911 if (!VNI->isDefAccurate()) 912 return false; 913 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 914 bool DefIsLoad = false; 915 if (!ReMatDefMI || 916 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 917 return false; 918 isLoad |= DefIsLoad; 919 } 920 return true; 921} 922 923/// FilterFoldedOps - Filter out two-address use operands. Return 924/// true if it finds any issue with the operands that ought to prevent 925/// folding. 926static bool FilterFoldedOps(MachineInstr *MI, 927 SmallVector<unsigned, 2> &Ops, 928 unsigned &MRInfo, 929 SmallVector<unsigned, 2> &FoldOps) { 930 MRInfo = 0; 931 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 932 unsigned OpIdx = Ops[i]; 933 MachineOperand &MO = MI->getOperand(OpIdx); 934 // FIXME: fold subreg use. 935 if (MO.getSubReg()) 936 return true; 937 if (MO.isDef()) 938 MRInfo |= (unsigned)VirtRegMap::isMod; 939 else { 940 // Filter out two-address use operand(s). 941 if (MI->isRegTiedToDefOperand(OpIdx)) { 942 MRInfo = VirtRegMap::isModRef; 943 continue; 944 } 945 MRInfo |= (unsigned)VirtRegMap::isRef; 946 } 947 FoldOps.push_back(OpIdx); 948 } 949 return false; 950} 951 952 953/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 954/// slot / to reg or any rematerialized load into ith operand of specified 955/// MI. If it is successul, MI is updated with the newly created MI and 956/// returns true. 957bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 958 VirtRegMap &vrm, MachineInstr *DefMI, 959 SlotIndex InstrIdx, 960 SmallVector<unsigned, 2> &Ops, 961 bool isSS, int Slot, unsigned Reg) { 962 // If it is an implicit def instruction, just delete it. 963 if (MI->isImplicitDef()) { 964 RemoveMachineInstrFromMaps(MI); 965 vrm.RemoveMachineInstrFromMaps(MI); 966 MI->eraseFromParent(); 967 ++numFolds; 968 return true; 969 } 970 971 // Filter the list of operand indexes that are to be folded. Abort if 972 // any operand will prevent folding. 973 unsigned MRInfo = 0; 974 SmallVector<unsigned, 2> FoldOps; 975 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 976 return false; 977 978 // The only time it's safe to fold into a two address instruction is when 979 // it's folding reload and spill from / into a spill stack slot. 980 if (DefMI && (MRInfo & VirtRegMap::isMod)) 981 return false; 982 983 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 984 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 985 if (fmi) { 986 // Remember this instruction uses the spill slot. 987 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 988 989 // Attempt to fold the memory reference into the instruction. If 990 // we can do this, we don't need to insert spill code. 991 MachineBasicBlock &MBB = *MI->getParent(); 992 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 993 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 994 vrm.transferSpillPts(MI, fmi); 995 vrm.transferRestorePts(MI, fmi); 996 vrm.transferEmergencySpills(MI, fmi); 997 ReplaceMachineInstrInMaps(MI, fmi); 998 MI = MBB.insert(MBB.erase(MI), fmi); 999 ++numFolds; 1000 return true; 1001 } 1002 return false; 1003} 1004 1005/// canFoldMemoryOperand - Returns true if the specified load / store 1006/// folding is possible. 1007bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 1008 SmallVector<unsigned, 2> &Ops, 1009 bool ReMat) const { 1010 // Filter the list of operand indexes that are to be folded. Abort if 1011 // any operand will prevent folding. 1012 unsigned MRInfo = 0; 1013 SmallVector<unsigned, 2> FoldOps; 1014 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 1015 return false; 1016 1017 // It's only legal to remat for a use, not a def. 1018 if (ReMat && (MRInfo & VirtRegMap::isMod)) 1019 return false; 1020 1021 return tii_->canFoldMemoryOperand(MI, FoldOps); 1022} 1023 1024bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 1025 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 1026 1027 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 1028 1029 if (mbb == 0) 1030 return false; 1031 1032 for (++itr; itr != li.ranges.end(); ++itr) { 1033 MachineBasicBlock *mbb2 = 1034 indexes_->getMBBCoveringRange(itr->start, itr->end); 1035 1036 if (mbb2 != mbb) 1037 return false; 1038 } 1039 1040 return true; 1041} 1042 1043/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1044/// interval on to-be re-materialized operands of MI) with new register. 1045void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1046 MachineInstr *MI, unsigned NewVReg, 1047 VirtRegMap &vrm) { 1048 // There is an implicit use. That means one of the other operand is 1049 // being remat'ed and the remat'ed instruction has li.reg as an 1050 // use operand. Make sure we rewrite that as well. 1051 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1052 MachineOperand &MO = MI->getOperand(i); 1053 if (!MO.isReg()) 1054 continue; 1055 unsigned Reg = MO.getReg(); 1056 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1057 continue; 1058 if (!vrm.isReMaterialized(Reg)) 1059 continue; 1060 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1061 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1062 if (UseMO) 1063 UseMO->setReg(NewVReg); 1064 } 1065} 1066 1067/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1068/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1069bool LiveIntervals:: 1070rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1071 bool TrySplit, SlotIndex index, SlotIndex end, 1072 MachineInstr *MI, 1073 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1074 unsigned Slot, int LdSlot, 1075 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1076 VirtRegMap &vrm, 1077 const TargetRegisterClass* rc, 1078 SmallVector<int, 4> &ReMatIds, 1079 const MachineLoopInfo *loopInfo, 1080 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1081 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1082 std::vector<LiveInterval*> &NewLIs) { 1083 bool CanFold = false; 1084 RestartInstruction: 1085 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1086 MachineOperand& mop = MI->getOperand(i); 1087 if (!mop.isReg()) 1088 continue; 1089 unsigned Reg = mop.getReg(); 1090 unsigned RegI = Reg; 1091 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1092 continue; 1093 if (Reg != li.reg) 1094 continue; 1095 1096 bool TryFold = !DefIsReMat; 1097 bool FoldSS = true; // Default behavior unless it's a remat. 1098 int FoldSlot = Slot; 1099 if (DefIsReMat) { 1100 // If this is the rematerializable definition MI itself and 1101 // all of its uses are rematerialized, simply delete it. 1102 if (MI == ReMatOrigDefMI && CanDelete) { 1103 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " 1104 << *MI << '\n'); 1105 RemoveMachineInstrFromMaps(MI); 1106 vrm.RemoveMachineInstrFromMaps(MI); 1107 MI->eraseFromParent(); 1108 break; 1109 } 1110 1111 // If def for this use can't be rematerialized, then try folding. 1112 // If def is rematerializable and it's a load, also try folding. 1113 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1114 if (isLoad) { 1115 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1116 FoldSS = isLoadSS; 1117 FoldSlot = LdSlot; 1118 } 1119 } 1120 1121 // Scan all of the operands of this instruction rewriting operands 1122 // to use NewVReg instead of li.reg as appropriate. We do this for 1123 // two reasons: 1124 // 1125 // 1. If the instr reads the same spilled vreg multiple times, we 1126 // want to reuse the NewVReg. 1127 // 2. If the instr is a two-addr instruction, we are required to 1128 // keep the src/dst regs pinned. 1129 // 1130 // Keep track of whether we replace a use and/or def so that we can 1131 // create the spill interval with the appropriate range. 1132 1133 HasUse = mop.isUse(); 1134 HasDef = mop.isDef(); 1135 SmallVector<unsigned, 2> Ops; 1136 Ops.push_back(i); 1137 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 1138 const MachineOperand &MOj = MI->getOperand(j); 1139 if (!MOj.isReg()) 1140 continue; 1141 unsigned RegJ = MOj.getReg(); 1142 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 1143 continue; 1144 if (RegJ == RegI) { 1145 Ops.push_back(j); 1146 if (!MOj.isUndef()) { 1147 HasUse |= MOj.isUse(); 1148 HasDef |= MOj.isDef(); 1149 } 1150 } 1151 } 1152 1153 // Create a new virtual register for the spill interval. 1154 // Create the new register now so we can map the fold instruction 1155 // to the new register so when it is unfolded we get the correct 1156 // answer. 1157 bool CreatedNewVReg = false; 1158 if (NewVReg == 0) { 1159 NewVReg = mri_->createVirtualRegister(rc); 1160 vrm.grow(); 1161 CreatedNewVReg = true; 1162 1163 // The new virtual register should get the same allocation hints as the 1164 // old one. 1165 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1166 if (Hint.first || Hint.second) 1167 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1168 } 1169 1170 if (!TryFold) 1171 CanFold = false; 1172 else { 1173 // Do not fold load / store here if we are splitting. We'll find an 1174 // optimal point to insert a load / store later. 1175 if (!TrySplit) { 1176 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1177 Ops, FoldSS, FoldSlot, NewVReg)) { 1178 // Folding the load/store can completely change the instruction in 1179 // unpredictable ways, rescan it from the beginning. 1180 1181 if (FoldSS) { 1182 // We need to give the new vreg the same stack slot as the 1183 // spilled interval. 1184 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1185 } 1186 1187 HasUse = false; 1188 HasDef = false; 1189 CanFold = false; 1190 if (isNotInMIMap(MI)) 1191 break; 1192 goto RestartInstruction; 1193 } 1194 } else { 1195 // We'll try to fold it later if it's profitable. 1196 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1197 } 1198 } 1199 1200 mop.setReg(NewVReg); 1201 if (mop.isImplicit()) 1202 rewriteImplicitOps(li, MI, NewVReg, vrm); 1203 1204 // Reuse NewVReg for other reads. 1205 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1206 MachineOperand &mopj = MI->getOperand(Ops[j]); 1207 mopj.setReg(NewVReg); 1208 if (mopj.isImplicit()) 1209 rewriteImplicitOps(li, MI, NewVReg, vrm); 1210 } 1211 1212 if (CreatedNewVReg) { 1213 if (DefIsReMat) { 1214 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1215 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1216 // Each valnum may have its own remat id. 1217 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1218 } else { 1219 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1220 } 1221 if (!CanDelete || (HasUse && HasDef)) { 1222 // If this is a two-addr instruction then its use operands are 1223 // rematerializable but its def is not. It should be assigned a 1224 // stack slot. 1225 vrm.assignVirt2StackSlot(NewVReg, Slot); 1226 } 1227 } else { 1228 vrm.assignVirt2StackSlot(NewVReg, Slot); 1229 } 1230 } else if (HasUse && HasDef && 1231 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1232 // If this interval hasn't been assigned a stack slot (because earlier 1233 // def is a deleted remat def), do it now. 1234 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1235 vrm.assignVirt2StackSlot(NewVReg, Slot); 1236 } 1237 1238 // Re-matting an instruction with virtual register use. Add the 1239 // register as an implicit use on the use MI. 1240 if (DefIsReMat && ImpUse) 1241 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1242 1243 // Create a new register interval for this spill / remat. 1244 LiveInterval &nI = getOrCreateInterval(NewVReg); 1245 if (CreatedNewVReg) { 1246 NewLIs.push_back(&nI); 1247 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1248 if (TrySplit) 1249 vrm.setIsSplitFromReg(NewVReg, li.reg); 1250 } 1251 1252 if (HasUse) { 1253 if (CreatedNewVReg) { 1254 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1255 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1256 DEBUG(dbgs() << " +" << LR); 1257 nI.addRange(LR); 1258 } else { 1259 // Extend the split live interval to this def / use. 1260 SlotIndex End = index.getDefIndex(); 1261 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1262 nI.getValNumInfo(nI.getNumValNums()-1)); 1263 DEBUG(dbgs() << " +" << LR); 1264 nI.addRange(LR); 1265 } 1266 } 1267 if (HasDef) { 1268 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1269 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1270 DEBUG(dbgs() << " +" << LR); 1271 nI.addRange(LR); 1272 } 1273 1274 DEBUG({ 1275 dbgs() << "\t\t\t\tAdded new interval: "; 1276 nI.print(dbgs(), tri_); 1277 dbgs() << '\n'; 1278 }); 1279 } 1280 return CanFold; 1281} 1282bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1283 const VNInfo *VNI, 1284 MachineBasicBlock *MBB, 1285 SlotIndex Idx) const { 1286 SlotIndex End = getMBBEndIdx(MBB); 1287 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1288 if (VNI->kills[j].isPHI()) 1289 continue; 1290 1291 SlotIndex KillIdx = VNI->kills[j]; 1292 if (KillIdx > Idx && KillIdx <= End) 1293 return true; 1294 } 1295 return false; 1296} 1297 1298/// RewriteInfo - Keep track of machine instrs that will be rewritten 1299/// during spilling. 1300namespace { 1301 struct RewriteInfo { 1302 SlotIndex Index; 1303 MachineInstr *MI; 1304 bool HasUse; 1305 bool HasDef; 1306 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d) 1307 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1308 }; 1309 1310 struct RewriteInfoCompare { 1311 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1312 return LHS.Index < RHS.Index; 1313 } 1314 }; 1315} 1316 1317void LiveIntervals:: 1318rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1319 LiveInterval::Ranges::const_iterator &I, 1320 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1321 unsigned Slot, int LdSlot, 1322 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1323 VirtRegMap &vrm, 1324 const TargetRegisterClass* rc, 1325 SmallVector<int, 4> &ReMatIds, 1326 const MachineLoopInfo *loopInfo, 1327 BitVector &SpillMBBs, 1328 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1329 BitVector &RestoreMBBs, 1330 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1331 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1332 std::vector<LiveInterval*> &NewLIs) { 1333 bool AllCanFold = true; 1334 unsigned NewVReg = 0; 1335 SlotIndex start = I->start.getBaseIndex(); 1336 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1337 1338 // First collect all the def / use in this live range that will be rewritten. 1339 // Make sure they are sorted according to instruction index. 1340 std::vector<RewriteInfo> RewriteMIs; 1341 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1342 re = mri_->reg_end(); ri != re; ) { 1343 MachineInstr *MI = &*ri; 1344 MachineOperand &O = ri.getOperand(); 1345 ++ri; 1346 if (MI->isDebugValue()) { 1347 // Modify DBG_VALUE now that the value is in a spill slot. 1348 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { 1349 uint64_t Offset = MI->getOperand(1).getImm(); 1350 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1351 DebugLoc DL = MI->getDebugLoc(); 1352 int FI = isLoadSS ? LdSlot : (int)Slot; 1353 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, 1354 Offset, MDPtr, DL)) { 1355 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1356 ReplaceMachineInstrInMaps(MI, NewDV); 1357 MachineBasicBlock *MBB = MI->getParent(); 1358 MBB->insert(MBB->erase(MI), NewDV); 1359 continue; 1360 } 1361 } 1362 1363 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1364 RemoveMachineInstrFromMaps(MI); 1365 vrm.RemoveMachineInstrFromMaps(MI); 1366 MI->eraseFromParent(); 1367 continue; 1368 } 1369 assert(!O.isImplicit() && "Spilling register that's used as implicit use?"); 1370 SlotIndex index = getInstructionIndex(MI); 1371 if (index < start || index >= end) 1372 continue; 1373 1374 if (O.isUndef()) 1375 // Must be defined by an implicit def. It should not be spilled. Note, 1376 // this is for correctness reason. e.g. 1377 // 8 %reg1024<def> = IMPLICIT_DEF 1378 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1379 // The live range [12, 14) are not part of the r1024 live interval since 1380 // it's defined by an implicit def. It will not conflicts with live 1381 // interval of r1025. Now suppose both registers are spilled, you can 1382 // easily see a situation where both registers are reloaded before 1383 // the INSERT_SUBREG and both target registers that would overlap. 1384 continue; 1385 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1386 } 1387 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1388 1389 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1390 // Now rewrite the defs and uses. 1391 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1392 RewriteInfo &rwi = RewriteMIs[i]; 1393 ++i; 1394 SlotIndex index = rwi.Index; 1395 bool MIHasUse = rwi.HasUse; 1396 bool MIHasDef = rwi.HasDef; 1397 MachineInstr *MI = rwi.MI; 1398 // If MI def and/or use the same register multiple times, then there 1399 // are multiple entries. 1400 unsigned NumUses = MIHasUse; 1401 while (i != e && RewriteMIs[i].MI == MI) { 1402 assert(RewriteMIs[i].Index == index); 1403 bool isUse = RewriteMIs[i].HasUse; 1404 if (isUse) ++NumUses; 1405 MIHasUse |= isUse; 1406 MIHasDef |= RewriteMIs[i].HasDef; 1407 ++i; 1408 } 1409 MachineBasicBlock *MBB = MI->getParent(); 1410 1411 if (ImpUse && MI != ReMatDefMI) { 1412 // Re-matting an instruction with virtual register use. Prevent interval 1413 // from being spilled. 1414 getInterval(ImpUse).markNotSpillable(); 1415 } 1416 1417 unsigned MBBId = MBB->getNumber(); 1418 unsigned ThisVReg = 0; 1419 if (TrySplit) { 1420 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1421 if (NVI != MBBVRegsMap.end()) { 1422 ThisVReg = NVI->second; 1423 // One common case: 1424 // x = use 1425 // ... 1426 // ... 1427 // def = ... 1428 // = use 1429 // It's better to start a new interval to avoid artifically 1430 // extend the new interval. 1431 if (MIHasDef && !MIHasUse) { 1432 MBBVRegsMap.erase(MBB->getNumber()); 1433 ThisVReg = 0; 1434 } 1435 } 1436 } 1437 1438 bool IsNew = ThisVReg == 0; 1439 if (IsNew) { 1440 // This ends the previous live interval. If all of its def / use 1441 // can be folded, give it a low spill weight. 1442 if (NewVReg && TrySplit && AllCanFold) { 1443 LiveInterval &nI = getOrCreateInterval(NewVReg); 1444 nI.weight /= 10.0F; 1445 } 1446 AllCanFold = true; 1447 } 1448 NewVReg = ThisVReg; 1449 1450 bool HasDef = false; 1451 bool HasUse = false; 1452 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1453 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1454 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1455 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1456 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1457 if (!HasDef && !HasUse) 1458 continue; 1459 1460 AllCanFold &= CanFold; 1461 1462 // Update weight of spill interval. 1463 LiveInterval &nI = getOrCreateInterval(NewVReg); 1464 if (!TrySplit) { 1465 // The spill weight is now infinity as it cannot be spilled again. 1466 nI.markNotSpillable(); 1467 continue; 1468 } 1469 1470 // Keep track of the last def and first use in each MBB. 1471 if (HasDef) { 1472 if (MI != ReMatOrigDefMI || !CanDelete) { 1473 bool HasKill = false; 1474 if (!HasUse) 1475 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1476 else { 1477 // If this is a two-address code, then this index starts a new VNInfo. 1478 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1479 if (VNI) 1480 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1481 } 1482 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1483 SpillIdxes.find(MBBId); 1484 if (!HasKill) { 1485 if (SII == SpillIdxes.end()) { 1486 std::vector<SRInfo> S; 1487 S.push_back(SRInfo(index, NewVReg, true)); 1488 SpillIdxes.insert(std::make_pair(MBBId, S)); 1489 } else if (SII->second.back().vreg != NewVReg) { 1490 SII->second.push_back(SRInfo(index, NewVReg, true)); 1491 } else if (index > SII->second.back().index) { 1492 // If there is an earlier def and this is a two-address 1493 // instruction, then it's not possible to fold the store (which 1494 // would also fold the load). 1495 SRInfo &Info = SII->second.back(); 1496 Info.index = index; 1497 Info.canFold = !HasUse; 1498 } 1499 SpillMBBs.set(MBBId); 1500 } else if (SII != SpillIdxes.end() && 1501 SII->second.back().vreg == NewVReg && 1502 index > SII->second.back().index) { 1503 // There is an earlier def that's not killed (must be two-address). 1504 // The spill is no longer needed. 1505 SII->second.pop_back(); 1506 if (SII->second.empty()) { 1507 SpillIdxes.erase(MBBId); 1508 SpillMBBs.reset(MBBId); 1509 } 1510 } 1511 } 1512 } 1513 1514 if (HasUse) { 1515 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1516 SpillIdxes.find(MBBId); 1517 if (SII != SpillIdxes.end() && 1518 SII->second.back().vreg == NewVReg && 1519 index > SII->second.back().index) 1520 // Use(s) following the last def, it's not safe to fold the spill. 1521 SII->second.back().canFold = false; 1522 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1523 RestoreIdxes.find(MBBId); 1524 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1525 // If we are splitting live intervals, only fold if it's the first 1526 // use and there isn't another use later in the MBB. 1527 RII->second.back().canFold = false; 1528 else if (IsNew) { 1529 // Only need a reload if there isn't an earlier def / use. 1530 if (RII == RestoreIdxes.end()) { 1531 std::vector<SRInfo> Infos; 1532 Infos.push_back(SRInfo(index, NewVReg, true)); 1533 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1534 } else { 1535 RII->second.push_back(SRInfo(index, NewVReg, true)); 1536 } 1537 RestoreMBBs.set(MBBId); 1538 } 1539 } 1540 1541 // Update spill weight. 1542 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1543 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1544 } 1545 1546 if (NewVReg && TrySplit && AllCanFold) { 1547 // If all of its def / use can be folded, give it a low spill weight. 1548 LiveInterval &nI = getOrCreateInterval(NewVReg); 1549 nI.weight /= 10.0F; 1550 } 1551} 1552 1553bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1554 unsigned vr, BitVector &RestoreMBBs, 1555 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1556 if (!RestoreMBBs[Id]) 1557 return false; 1558 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1559 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1560 if (Restores[i].index == index && 1561 Restores[i].vreg == vr && 1562 Restores[i].canFold) 1563 return true; 1564 return false; 1565} 1566 1567void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1568 unsigned vr, BitVector &RestoreMBBs, 1569 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1570 if (!RestoreMBBs[Id]) 1571 return; 1572 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1573 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1574 if (Restores[i].index == index && Restores[i].vreg) 1575 Restores[i].index = SlotIndex(); 1576} 1577 1578/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1579/// spilled and create empty intervals for their uses. 1580void 1581LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1582 const TargetRegisterClass* rc, 1583 std::vector<LiveInterval*> &NewLIs) { 1584 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1585 re = mri_->reg_end(); ri != re; ) { 1586 MachineOperand &O = ri.getOperand(); 1587 MachineInstr *MI = &*ri; 1588 ++ri; 1589 if (MI->isDebugValue()) { 1590 // Remove debug info for now. 1591 O.setReg(0U); 1592 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1593 continue; 1594 } 1595 if (O.isDef()) { 1596 assert(MI->isImplicitDef() && 1597 "Register def was not rewritten?"); 1598 RemoveMachineInstrFromMaps(MI); 1599 vrm.RemoveMachineInstrFromMaps(MI); 1600 MI->eraseFromParent(); 1601 } else { 1602 // This must be an use of an implicit_def so it's not part of the live 1603 // interval. Create a new empty live interval for it. 1604 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1605 unsigned NewVReg = mri_->createVirtualRegister(rc); 1606 vrm.grow(); 1607 vrm.setIsImplicitlyDefined(NewVReg); 1608 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1609 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1610 MachineOperand &MO = MI->getOperand(i); 1611 if (MO.isReg() && MO.getReg() == li.reg) { 1612 MO.setReg(NewVReg); 1613 MO.setIsUndef(); 1614 } 1615 } 1616 } 1617 } 1618} 1619 1620float 1621LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 1622 // Limit the loop depth ridiculousness. 1623 if (loopDepth > 200) 1624 loopDepth = 200; 1625 1626 // The loop depth is used to roughly estimate the number of times the 1627 // instruction is executed. Something like 10^d is simple, but will quickly 1628 // overflow a float. This expression behaves like 10^d for small d, but is 1629 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 1630 // headroom before overflow. 1631 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth); 1632 1633 return (isDef + isUse) * lc; 1634} 1635 1636void 1637LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) { 1638 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) 1639 normalizeSpillWeight(*NewLIs[i]); 1640} 1641 1642std::vector<LiveInterval*> LiveIntervals:: 1643addIntervalsForSpillsFast(const LiveInterval &li, 1644 const MachineLoopInfo *loopInfo, 1645 VirtRegMap &vrm) { 1646 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1647 1648 std::vector<LiveInterval*> added; 1649 1650 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1651 1652 DEBUG({ 1653 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1654 li.dump(); 1655 dbgs() << '\n'; 1656 }); 1657 1658 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1659 1660 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg); 1661 while (RI != mri_->reg_end()) { 1662 MachineInstr* MI = &*RI; 1663 1664 SmallVector<unsigned, 2> Indices; 1665 bool HasUse = false; 1666 bool HasDef = false; 1667 1668 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1669 MachineOperand& mop = MI->getOperand(i); 1670 if (!mop.isReg() || mop.getReg() != li.reg) continue; 1671 1672 HasUse |= MI->getOperand(i).isUse(); 1673 HasDef |= MI->getOperand(i).isDef(); 1674 1675 Indices.push_back(i); 1676 } 1677 1678 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI), 1679 Indices, true, slot, li.reg)) { 1680 unsigned NewVReg = mri_->createVirtualRegister(rc); 1681 vrm.grow(); 1682 vrm.assignVirt2StackSlot(NewVReg, slot); 1683 1684 // create a new register for this spill 1685 LiveInterval &nI = getOrCreateInterval(NewVReg); 1686 nI.markNotSpillable(); 1687 1688 // Rewrite register operands to use the new vreg. 1689 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(), 1690 E = Indices.end(); I != E; ++I) { 1691 MI->getOperand(*I).setReg(NewVReg); 1692 1693 if (MI->getOperand(*I).isUse()) 1694 MI->getOperand(*I).setIsKill(true); 1695 } 1696 1697 // Fill in the new live interval. 1698 SlotIndex index = getInstructionIndex(MI); 1699 if (HasUse) { 1700 LiveRange LR(index.getLoadIndex(), index.getUseIndex(), 1701 nI.getNextValue(SlotIndex(), 0, false, 1702 getVNInfoAllocator())); 1703 DEBUG(dbgs() << " +" << LR); 1704 nI.addRange(LR); 1705 vrm.addRestorePoint(NewVReg, MI); 1706 } 1707 if (HasDef) { 1708 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1709 nI.getNextValue(SlotIndex(), 0, false, 1710 getVNInfoAllocator())); 1711 DEBUG(dbgs() << " +" << LR); 1712 nI.addRange(LR); 1713 vrm.addSpillPoint(NewVReg, true, MI); 1714 } 1715 1716 added.push_back(&nI); 1717 1718 DEBUG({ 1719 dbgs() << "\t\t\t\tadded new interval: "; 1720 nI.dump(); 1721 dbgs() << '\n'; 1722 }); 1723 } 1724 1725 1726 RI = mri_->reg_begin(li.reg); 1727 } 1728 1729 return added; 1730} 1731 1732std::vector<LiveInterval*> LiveIntervals:: 1733addIntervalsForSpills(const LiveInterval &li, 1734 SmallVectorImpl<LiveInterval*> &SpillIs, 1735 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1736 1737 if (EnableFastSpilling) 1738 return addIntervalsForSpillsFast(li, loopInfo, vrm); 1739 1740 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1741 1742 DEBUG({ 1743 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1744 li.print(dbgs(), tri_); 1745 dbgs() << '\n'; 1746 }); 1747 1748 // Each bit specify whether a spill is required in the MBB. 1749 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1750 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1751 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1752 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1753 DenseMap<unsigned,unsigned> MBBVRegsMap; 1754 std::vector<LiveInterval*> NewLIs; 1755 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1756 1757 unsigned NumValNums = li.getNumValNums(); 1758 SmallVector<MachineInstr*, 4> ReMatDefs; 1759 ReMatDefs.resize(NumValNums, NULL); 1760 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1761 ReMatOrigDefs.resize(NumValNums, NULL); 1762 SmallVector<int, 4> ReMatIds; 1763 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1764 BitVector ReMatDelete(NumValNums); 1765 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1766 1767 // Spilling a split live interval. It cannot be split any further. Also, 1768 // it's also guaranteed to be a single val# / range interval. 1769 if (vrm.getPreSplitReg(li.reg)) { 1770 vrm.setIsSplitFromReg(li.reg, 0); 1771 // Unset the split kill marker on the last use. 1772 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1773 if (KillIdx != SlotIndex()) { 1774 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1775 assert(KillMI && "Last use disappeared?"); 1776 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1777 assert(KillOp != -1 && "Last use disappeared?"); 1778 KillMI->getOperand(KillOp).setIsKill(false); 1779 } 1780 vrm.removeKillPoint(li.reg); 1781 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1782 Slot = vrm.getStackSlot(li.reg); 1783 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1784 MachineInstr *ReMatDefMI = DefIsReMat ? 1785 vrm.getReMaterializedMI(li.reg) : NULL; 1786 int LdSlot = 0; 1787 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1788 bool isLoad = isLoadSS || 1789 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1790 bool IsFirstRange = true; 1791 for (LiveInterval::Ranges::const_iterator 1792 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1793 // If this is a split live interval with multiple ranges, it means there 1794 // are two-address instructions that re-defined the value. Only the 1795 // first def can be rematerialized! 1796 if (IsFirstRange) { 1797 // Note ReMatOrigDefMI has already been deleted. 1798 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1799 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1800 false, vrm, rc, ReMatIds, loopInfo, 1801 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1802 MBBVRegsMap, NewLIs); 1803 } else { 1804 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1805 Slot, 0, false, false, false, 1806 false, vrm, rc, ReMatIds, loopInfo, 1807 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1808 MBBVRegsMap, NewLIs); 1809 } 1810 IsFirstRange = false; 1811 } 1812 1813 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1814 normalizeSpillWeights(NewLIs); 1815 return NewLIs; 1816 } 1817 1818 bool TrySplit = !intervalIsInOneMBB(li); 1819 if (TrySplit) 1820 ++numSplits; 1821 bool NeedStackSlot = false; 1822 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1823 i != e; ++i) { 1824 const VNInfo *VNI = *i; 1825 unsigned VN = VNI->id; 1826 if (VNI->isUnused()) 1827 continue; // Dead val#. 1828 // Is the def for the val# rematerializable? 1829 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1830 ? getInstructionFromIndex(VNI->def) : 0; 1831 bool dummy; 1832 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1833 // Remember how to remat the def of this val#. 1834 ReMatOrigDefs[VN] = ReMatDefMI; 1835 // Original def may be modified so we have to make a copy here. 1836 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1837 CloneMIs.push_back(Clone); 1838 ReMatDefs[VN] = Clone; 1839 1840 bool CanDelete = true; 1841 if (VNI->hasPHIKill()) { 1842 // A kill is a phi node, not all of its uses can be rematerialized. 1843 // It must not be deleted. 1844 CanDelete = false; 1845 // Need a stack slot if there is any live range where uses cannot be 1846 // rematerialized. 1847 NeedStackSlot = true; 1848 } 1849 if (CanDelete) 1850 ReMatDelete.set(VN); 1851 } else { 1852 // Need a stack slot if there is any live range where uses cannot be 1853 // rematerialized. 1854 NeedStackSlot = true; 1855 } 1856 } 1857 1858 // One stack slot per live interval. 1859 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1860 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1861 Slot = vrm.assignVirt2StackSlot(li.reg); 1862 1863 // This case only occurs when the prealloc splitter has already assigned 1864 // a stack slot to this vreg. 1865 else 1866 Slot = vrm.getStackSlot(li.reg); 1867 } 1868 1869 // Create new intervals and rewrite defs and uses. 1870 for (LiveInterval::Ranges::const_iterator 1871 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1872 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1873 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1874 bool DefIsReMat = ReMatDefMI != NULL; 1875 bool CanDelete = ReMatDelete[I->valno->id]; 1876 int LdSlot = 0; 1877 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1878 bool isLoad = isLoadSS || 1879 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1880 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1881 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1882 CanDelete, vrm, rc, ReMatIds, loopInfo, 1883 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1884 MBBVRegsMap, NewLIs); 1885 } 1886 1887 // Insert spills / restores if we are splitting. 1888 if (!TrySplit) { 1889 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1890 normalizeSpillWeights(NewLIs); 1891 return NewLIs; 1892 } 1893 1894 SmallPtrSet<LiveInterval*, 4> AddedKill; 1895 SmallVector<unsigned, 2> Ops; 1896 if (NeedStackSlot) { 1897 int Id = SpillMBBs.find_first(); 1898 while (Id != -1) { 1899 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1900 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1901 SlotIndex index = spills[i].index; 1902 unsigned VReg = spills[i].vreg; 1903 LiveInterval &nI = getOrCreateInterval(VReg); 1904 bool isReMat = vrm.isReMaterialized(VReg); 1905 MachineInstr *MI = getInstructionFromIndex(index); 1906 bool CanFold = false; 1907 bool FoundUse = false; 1908 Ops.clear(); 1909 if (spills[i].canFold) { 1910 CanFold = true; 1911 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1912 MachineOperand &MO = MI->getOperand(j); 1913 if (!MO.isReg() || MO.getReg() != VReg) 1914 continue; 1915 1916 Ops.push_back(j); 1917 if (MO.isDef()) 1918 continue; 1919 if (isReMat || 1920 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1921 RestoreMBBs, RestoreIdxes))) { 1922 // MI has two-address uses of the same register. If the use 1923 // isn't the first and only use in the BB, then we can't fold 1924 // it. FIXME: Move this to rewriteInstructionsForSpills. 1925 CanFold = false; 1926 break; 1927 } 1928 FoundUse = true; 1929 } 1930 } 1931 // Fold the store into the def if possible. 1932 bool Folded = false; 1933 if (CanFold && !Ops.empty()) { 1934 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1935 Folded = true; 1936 if (FoundUse) { 1937 // Also folded uses, do not issue a load. 1938 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1939 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1940 } 1941 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1942 } 1943 } 1944 1945 // Otherwise tell the spiller to issue a spill. 1946 if (!Folded) { 1947 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1948 bool isKill = LR->end == index.getStoreIndex(); 1949 if (!MI->registerDefIsDead(nI.reg)) 1950 // No need to spill a dead def. 1951 vrm.addSpillPoint(VReg, isKill, MI); 1952 if (isKill) 1953 AddedKill.insert(&nI); 1954 } 1955 } 1956 Id = SpillMBBs.find_next(Id); 1957 } 1958 } 1959 1960 int Id = RestoreMBBs.find_first(); 1961 while (Id != -1) { 1962 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1963 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1964 SlotIndex index = restores[i].index; 1965 if (index == SlotIndex()) 1966 continue; 1967 unsigned VReg = restores[i].vreg; 1968 LiveInterval &nI = getOrCreateInterval(VReg); 1969 bool isReMat = vrm.isReMaterialized(VReg); 1970 MachineInstr *MI = getInstructionFromIndex(index); 1971 bool CanFold = false; 1972 Ops.clear(); 1973 if (restores[i].canFold) { 1974 CanFold = true; 1975 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1976 MachineOperand &MO = MI->getOperand(j); 1977 if (!MO.isReg() || MO.getReg() != VReg) 1978 continue; 1979 1980 if (MO.isDef()) { 1981 // If this restore were to be folded, it would have been folded 1982 // already. 1983 CanFold = false; 1984 break; 1985 } 1986 Ops.push_back(j); 1987 } 1988 } 1989 1990 // Fold the load into the use if possible. 1991 bool Folded = false; 1992 if (CanFold && !Ops.empty()) { 1993 if (!isReMat) 1994 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1995 else { 1996 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1997 int LdSlot = 0; 1998 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1999 // If the rematerializable def is a load, also try to fold it. 2000 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 2001 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 2002 Ops, isLoadSS, LdSlot, VReg); 2003 if (!Folded) { 2004 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 2005 if (ImpUse) { 2006 // Re-matting an instruction with virtual register use. Add the 2007 // register as an implicit use on the use MI and mark the register 2008 // interval as unspillable. 2009 LiveInterval &ImpLi = getInterval(ImpUse); 2010 ImpLi.markNotSpillable(); 2011 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 2012 } 2013 } 2014 } 2015 } 2016 // If folding is not possible / failed, then tell the spiller to issue a 2017 // load / rematerialization for us. 2018 if (Folded) 2019 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 2020 else 2021 vrm.addRestorePoint(VReg, MI); 2022 } 2023 Id = RestoreMBBs.find_next(Id); 2024 } 2025 2026 // Finalize intervals: add kills, finalize spill weights, and filter out 2027 // dead intervals. 2028 std::vector<LiveInterval*> RetNewLIs; 2029 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 2030 LiveInterval *LI = NewLIs[i]; 2031 if (!LI->empty()) { 2032 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 2033 if (!AddedKill.count(LI)) { 2034 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 2035 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 2036 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 2037 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 2038 assert(UseIdx != -1); 2039 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 2040 LastUse->getOperand(UseIdx).setIsKill(); 2041 vrm.addKillPoint(LI->reg, LastUseIdx); 2042 } 2043 } 2044 RetNewLIs.push_back(LI); 2045 } 2046 } 2047 2048 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 2049 normalizeSpillWeights(RetNewLIs); 2050 return RetNewLIs; 2051} 2052 2053/// hasAllocatableSuperReg - Return true if the specified physical register has 2054/// any super register that's allocatable. 2055bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 2056 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 2057 if (allocatableRegs_[*AS] && hasInterval(*AS)) 2058 return true; 2059 return false; 2060} 2061 2062/// getRepresentativeReg - Find the largest super register of the specified 2063/// physical register. 2064unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 2065 // Find the largest super-register that is allocatable. 2066 unsigned BestReg = Reg; 2067 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 2068 unsigned SuperReg = *AS; 2069 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 2070 BestReg = SuperReg; 2071 break; 2072 } 2073 } 2074 return BestReg; 2075} 2076 2077/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 2078/// specified interval that conflicts with the specified physical register. 2079unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 2080 unsigned PhysReg) const { 2081 unsigned NumConflicts = 0; 2082 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 2083 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2084 E = mri_->reg_end(); I != E; ++I) { 2085 MachineOperand &O = I.getOperand(); 2086 MachineInstr *MI = O.getParent(); 2087 if (MI->isDebugValue()) 2088 continue; 2089 SlotIndex Index = getInstructionIndex(MI); 2090 if (pli.liveAt(Index)) 2091 ++NumConflicts; 2092 } 2093 return NumConflicts; 2094} 2095 2096/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 2097/// around all defs and uses of the specified interval. Return true if it 2098/// was able to cut its interval. 2099bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 2100 unsigned PhysReg, VirtRegMap &vrm) { 2101 unsigned SpillReg = getRepresentativeReg(PhysReg); 2102 2103 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 2104 // If there are registers which alias PhysReg, but which are not a 2105 // sub-register of the chosen representative super register. Assert 2106 // since we can't handle it yet. 2107 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 2108 tri_->isSuperRegister(*AS, SpillReg)); 2109 2110 bool Cut = false; 2111 SmallVector<unsigned, 4> PRegs; 2112 if (hasInterval(SpillReg)) 2113 PRegs.push_back(SpillReg); 2114 else { 2115 SmallSet<unsigned, 4> Added; 2116 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 2117 if (Added.insert(*AS) && hasInterval(*AS)) { 2118 PRegs.push_back(*AS); 2119 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 2120 Added.insert(*ASS); 2121 } 2122 } 2123 2124 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2125 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2126 E = mri_->reg_end(); I != E; ++I) { 2127 MachineOperand &O = I.getOperand(); 2128 MachineInstr *MI = O.getParent(); 2129 if (MI->isDebugValue() || SeenMIs.count(MI)) 2130 continue; 2131 SeenMIs.insert(MI); 2132 SlotIndex Index = getInstructionIndex(MI); 2133 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 2134 unsigned PReg = PRegs[i]; 2135 LiveInterval &pli = getInterval(PReg); 2136 if (!pli.liveAt(Index)) 2137 continue; 2138 vrm.addEmergencySpill(PReg, MI); 2139 SlotIndex StartIdx = Index.getLoadIndex(); 2140 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 2141 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 2142 pli.removeRange(StartIdx, EndIdx); 2143 Cut = true; 2144 } else { 2145 std::string msg; 2146 raw_string_ostream Msg(msg); 2147 Msg << "Ran out of registers during register allocation!"; 2148 if (MI->isInlineAsm()) { 2149 Msg << "\nPlease check your inline asm statement for invalid " 2150 << "constraints:\n"; 2151 MI->print(Msg, tm_); 2152 } 2153 report_fatal_error(Msg.str()); 2154 } 2155 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 2156 if (!hasInterval(*AS)) 2157 continue; 2158 LiveInterval &spli = getInterval(*AS); 2159 if (spli.liveAt(Index)) 2160 spli.removeRange(Index.getLoadIndex(), 2161 Index.getNextIndex().getBaseIndex()); 2162 } 2163 } 2164 } 2165 return Cut; 2166} 2167 2168LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2169 MachineInstr* startInst) { 2170 LiveInterval& Interval = getOrCreateInterval(reg); 2171 VNInfo* VN = Interval.getNextValue( 2172 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2173 startInst, true, getVNInfoAllocator()); 2174 VN->setHasPHIKill(true); 2175 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); 2176 LiveRange LR( 2177 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2178 getMBBEndIdx(startInst->getParent()), VN); 2179 Interval.addRange(LR); 2180 2181 return LR; 2182} 2183 2184