LiveIntervalAnalysis.cpp revision 1015ba7018c87f48cc7bb45a564eb4a27241e76a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> EnableFastSpilling("fast-spill", 54 cl::init(false), cl::Hidden); 55 56STATISTIC(numIntervals , "Number of original intervals"); 57STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 58STATISTIC(numSplits , "Number of intervals split"); 59 60char LiveIntervals::ID = 0; 61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 64 AU.setPreservesCFG(); 65 AU.addRequired<AliasAnalysis>(); 66 AU.addPreserved<AliasAnalysis>(); 67 AU.addPreserved<LiveVariables>(); 68 AU.addRequired<LiveVariables>(); 69 AU.addPreservedID(MachineLoopInfoID); 70 AU.addPreservedID(MachineDominatorsID); 71 72 if (!StrongPHIElim) { 73 AU.addPreservedID(PHIEliminationID); 74 AU.addRequiredID(PHIEliminationID); 75 } 76 77 AU.addRequiredID(TwoAddressInstructionPassID); 78 AU.addPreserved<ProcessImplicitDefs>(); 79 AU.addRequired<ProcessImplicitDefs>(); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83} 84 85void LiveIntervals::releaseMemory() { 86 // Free the live intervals themselves. 87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 88 E = r2iMap_.end(); I != E; ++I) 89 delete I->second; 90 91 r2iMap_.clear(); 92 93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 94 VNInfoAllocator.DestroyAll(); 95 while (!CloneMIs.empty()) { 96 MachineInstr *MI = CloneMIs.back(); 97 CloneMIs.pop_back(); 98 mf_->DeleteMachineInstr(MI); 99 } 100} 101 102/// runOnMachineFunction - Register allocate the whole function 103/// 104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 105 mf_ = &fn; 106 mri_ = &mf_->getRegInfo(); 107 tm_ = &fn.getTarget(); 108 tri_ = tm_->getRegisterInfo(); 109 tii_ = tm_->getInstrInfo(); 110 aa_ = &getAnalysis<AliasAnalysis>(); 111 lv_ = &getAnalysis<LiveVariables>(); 112 indexes_ = &getAnalysis<SlotIndexes>(); 113 allocatableRegs_ = tri_->getAllocatableSet(fn); 114 115 computeIntervals(); 116 117 numIntervals += getNumIntervals(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 for (const_iterator I = begin(), E = end(); I != E; ++I) { 127 I->second->print(OS, tri_); 128 OS << "\n"; 129 } 130 131 printInstrs(OS); 132} 133 134void LiveIntervals::printInstrs(raw_ostream &OS) const { 135 OS << "********** MACHINEINSTRS **********\n"; 136 137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 138 mbbi != mbbe; ++mbbi) { 139 OS << "BB#" << mbbi->getNumber() 140 << ":\t\t# derived from " << mbbi->getName() << "\n"; 141 for (MachineBasicBlock::iterator mii = mbbi->begin(), 142 mie = mbbi->end(); mii != mie; ++mii) { 143 if (mii->isDebugValue()) 144 OS << " \t" << *mii; 145 else 146 OS << getInstructionIndex(mii) << '\t' << *mii; 147 } 148 } 149} 150 151void LiveIntervals::dumpInstrs() const { 152 printInstrs(dbgs()); 153} 154 155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 156 VirtRegMap &vrm, unsigned reg) { 157 // We don't handle fancy stuff crossing basic block boundaries 158 if (li.ranges.size() != 1) 159 return true; 160 const LiveRange &range = li.ranges.front(); 161 SlotIndex idx = range.start.getBaseIndex(); 162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 163 164 // Skip deleted instructions 165 MachineInstr *firstMI = getInstructionFromIndex(idx); 166 while (!firstMI && idx != end) { 167 idx = idx.getNextIndex(); 168 firstMI = getInstructionFromIndex(idx); 169 } 170 if (!firstMI) 171 return false; 172 173 // Find last instruction in range 174 SlotIndex lastIdx = end.getPrevIndex(); 175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 176 while (!lastMI && lastIdx != idx) { 177 lastIdx = lastIdx.getPrevIndex(); 178 lastMI = getInstructionFromIndex(lastIdx); 179 } 180 if (!lastMI) 181 return false; 182 183 // Range cannot cross basic block boundaries or terminators 184 MachineBasicBlock *MBB = firstMI->getParent(); 185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 186 return true; 187 188 MachineBasicBlock::const_iterator E = lastMI; 189 ++E; 190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 191 const MachineInstr &MI = *I; 192 193 // Allow copies to and from li.reg 194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 196 if (SrcReg == li.reg || DstReg == li.reg) 197 continue; 198 199 // Check for operands using reg 200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 201 const MachineOperand& mop = MI.getOperand(i); 202 if (!mop.isReg()) 203 continue; 204 unsigned PhysReg = mop.getReg(); 205 if (PhysReg == 0 || PhysReg == li.reg) 206 continue; 207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 208 if (!vrm.hasPhys(PhysReg)) 209 continue; 210 PhysReg = vrm.getPhys(PhysReg); 211 } 212 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 213 return true; 214 } 215 } 216 217 // No conflicts found. 218 return false; 219} 220 221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except 222/// it checks for sub-register reference and it can check use as well. 223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li, 224 unsigned Reg, bool CheckUse, 225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 226 for (LiveInterval::Ranges::const_iterator 227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 228 for (SlotIndex index = I->start.getBaseIndex(), 229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 230 index != end; 231 index = index.getNextIndex()) { 232 MachineInstr *MI = getInstructionFromIndex(index); 233 if (!MI) 234 continue; // skip deleted instructions 235 236 if (JoinedCopies.count(MI)) 237 continue; 238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 239 MachineOperand& MO = MI->getOperand(i); 240 if (!MO.isReg()) 241 continue; 242 if (MO.isUse() && !CheckUse) 243 continue; 244 unsigned PhysReg = MO.getReg(); 245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg)) 246 continue; 247 if (tri_->isSubRegister(Reg, PhysReg)) 248 return true; 249 } 250 } 251 } 252 253 return false; 254} 255 256#ifndef NDEBUG 257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 258 if (TargetRegisterInfo::isPhysicalRegister(reg)) 259 dbgs() << tri_->getName(reg); 260 else 261 dbgs() << "%reg" << reg; 262} 263#endif 264 265static 266bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 267 unsigned Reg = MI.getOperand(MOIdx).getReg(); 268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 269 const MachineOperand &MO = MI.getOperand(i); 270 if (!MO.isReg()) 271 continue; 272 if (MO.getReg() == Reg && MO.isDef()) { 273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 274 MI.getOperand(MOIdx).getSubReg() && 275 MO.getSubReg()); 276 return true; 277 } 278 } 279 return false; 280} 281 282/// isPartialRedef - Return true if the specified def at the specific index is 283/// partially re-defining the specified live interval. A common case of this is 284/// a definition of the sub-register. 285bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 286 LiveInterval &interval) { 287 if (!MO.getSubReg() || MO.isEarlyClobber()) 288 return false; 289 290 SlotIndex RedefIndex = MIIdx.getDefIndex(); 291 const LiveRange *OldLR = 292 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 293 if (OldLR->valno->isDefAccurate()) { 294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 296 } 297 return false; 298} 299 300void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 301 MachineBasicBlock::iterator mi, 302 SlotIndex MIIdx, 303 MachineOperand& MO, 304 unsigned MOIdx, 305 LiveInterval &interval) { 306 DEBUG({ 307 dbgs() << "\t\tregister: "; 308 printRegName(interval.reg, tri_); 309 }); 310 311 // Virtual registers may be defined multiple times (due to phi 312 // elimination and 2-addr elimination). Much of what we do only has to be 313 // done once for the vreg. We use an empty interval to detect the first 314 // time we see a vreg. 315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 316 if (interval.empty()) { 317 // Get the Idx of the defining instructions. 318 SlotIndex defIndex = MIIdx.getDefIndex(); 319 // Earlyclobbers move back one, so that they overlap the live range 320 // of inputs. 321 if (MO.isEarlyClobber()) 322 defIndex = MIIdx.getUseIndex(); 323 324 // Make sure the first definition is not a partial redefinition. Add an 325 // <imp-def> of the full register. 326 if (MO.getSubReg()) 327 mi->addRegisterDefined(interval.reg); 328 329 MachineInstr *CopyMI = NULL; 330 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 331 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() || 332 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 333 CopyMI = mi; 334 335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, 336 VNInfoAllocator); 337 assert(ValNo->id == 0 && "First value in interval is not 0?"); 338 339 // Loop over all of the blocks that the vreg is defined in. There are 340 // two cases we have to handle here. The most common case is a vreg 341 // whose lifetime is contained within a basic block. In this case there 342 // will be a single kill, in MBB, which comes after the definition. 343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 344 // FIXME: what about dead vars? 345 SlotIndex killIdx; 346 if (vi.Kills[0] != mi) 347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 348 else 349 killIdx = defIndex.getStoreIndex(); 350 351 // If the kill happens after the definition, we have an intra-block 352 // live range. 353 if (killIdx > defIndex) { 354 assert(vi.AliveBlocks.empty() && 355 "Shouldn't be alive across any blocks!"); 356 LiveRange LR(defIndex, killIdx, ValNo); 357 interval.addRange(LR); 358 DEBUG(dbgs() << " +" << LR << "\n"); 359 ValNo->addKill(killIdx); 360 return; 361 } 362 } 363 364 // The other case we handle is when a virtual register lives to the end 365 // of the defining block, potentially live across some blocks, then is 366 // live into some number of blocks, but gets killed. Start by adding a 367 // range that goes from this definition to the end of the defining block. 368 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 369 DEBUG(dbgs() << " +" << NewLR); 370 interval.addRange(NewLR); 371 372 bool PHIJoin = lv_->isPHIJoin(interval.reg); 373 374 if (PHIJoin) { 375 // A phi join register is killed at the end of the MBB and revived as a new 376 // valno in the killing blocks. 377 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 378 DEBUG(dbgs() << " phi-join"); 379 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 380 ValNo->setHasPHIKill(true); 381 } else { 382 // Iterate over all of the blocks that the variable is completely 383 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 384 // live interval. 385 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 386 E = vi.AliveBlocks.end(); I != E; ++I) { 387 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); 388 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); 389 interval.addRange(LR); 390 DEBUG(dbgs() << " +" << LR); 391 } 392 } 393 394 // Finally, this virtual register is live from the start of any killing 395 // block to the 'use' slot of the killing instruction. 396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 397 MachineInstr *Kill = vi.Kills[i]; 398 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 399 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); 400 401 // Create interval with one of a NEW value number. Note that this value 402 // number isn't actually defined by an instruction, weird huh? :) 403 if (PHIJoin) { 404 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, 405 VNInfoAllocator); 406 ValNo->setIsPHIDef(true); 407 } 408 LiveRange LR(Start, killIdx, ValNo); 409 interval.addRange(LR); 410 ValNo->addKill(killIdx); 411 DEBUG(dbgs() << " +" << LR); 412 } 413 414 } else { 415 if (MultipleDefsBySameMI(*mi, MOIdx)) 416 // Multiple defs of the same virtual register by the same instruction. 417 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 418 // This is likely due to elimination of REG_SEQUENCE instructions. Return 419 // here since there is nothing to do. 420 return; 421 422 // If this is the second time we see a virtual register definition, it 423 // must be due to phi elimination or two addr elimination. If this is 424 // the result of two address elimination, then the vreg is one of the 425 // def-and-use register operand. 426 427 // It may also be partial redef like this: 428 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 429 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 430 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 431 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 432 // If this is a two-address definition, then we have already processed 433 // the live range. The only problem is that we didn't realize there 434 // are actually two values in the live interval. Because of this we 435 // need to take the LiveRegion that defines this register and split it 436 // into two values. 437 // Two-address vregs should always only be redefined once. This means 438 // that at this point, there should be exactly one value number in it. 439 assert((PartReDef || interval.containsOneValue()) && 440 "Unexpected 2-addr liveint!"); 441 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex(); 442 SlotIndex RedefIndex = MIIdx.getDefIndex(); 443 if (MO.isEarlyClobber()) 444 RedefIndex = MIIdx.getUseIndex(); 445 446 const LiveRange *OldLR = 447 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 448 VNInfo *OldValNo = OldLR->valno; 449 450 // Delete the initial value, which should be short and continuous, 451 // because the 2-addr copy must be in the same MBB as the redef. 452 interval.removeRange(DefIndex, RedefIndex); 453 454 // The new value number (#1) is defined by the instruction we claimed 455 // defined value #0. 456 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 457 false, // update at * 458 VNInfoAllocator); 459 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 460 461 // Value#0 is now defined by the 2-addr instruction. 462 OldValNo->def = RedefIndex; 463 OldValNo->setCopy(0); 464 465 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ... 466 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 467 if (PartReDef && 468 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 469 OldValNo->setCopy(&*mi); 470 471 // Add the new live interval which replaces the range for the input copy. 472 LiveRange LR(DefIndex, RedefIndex, ValNo); 473 DEBUG(dbgs() << " replace range with " << LR); 474 interval.addRange(LR); 475 ValNo->addKill(RedefIndex); 476 477 // If this redefinition is dead, we need to add a dummy unit live 478 // range covering the def slot. 479 if (MO.isDead()) 480 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 481 OldValNo)); 482 483 DEBUG({ 484 dbgs() << " RESULT: "; 485 interval.print(dbgs(), tri_); 486 }); 487 } else if (lv_->isPHIJoin(interval.reg)) { 488 // In the case of PHI elimination, each variable definition is only 489 // live until the end of the block. We've already taken care of the 490 // rest of the live range. 491 492 SlotIndex defIndex = MIIdx.getDefIndex(); 493 if (MO.isEarlyClobber()) 494 defIndex = MIIdx.getUseIndex(); 495 496 VNInfo *ValNo; 497 MachineInstr *CopyMI = NULL; 498 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 499 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()|| 500 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 501 CopyMI = mi; 502 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 503 504 SlotIndex killIndex = getMBBEndIdx(mbb); 505 LiveRange LR(defIndex, killIndex, ValNo); 506 interval.addRange(LR); 507 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 508 ValNo->setHasPHIKill(true); 509 DEBUG(dbgs() << " phi-join +" << LR); 510 } else { 511 llvm_unreachable("Multiply defined register"); 512 } 513 } 514 515 DEBUG(dbgs() << '\n'); 516} 517 518void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 519 MachineBasicBlock::iterator mi, 520 SlotIndex MIIdx, 521 MachineOperand& MO, 522 LiveInterval &interval, 523 MachineInstr *CopyMI) { 524 // A physical register cannot be live across basic block, so its 525 // lifetime must end somewhere in its defining basic block. 526 DEBUG({ 527 dbgs() << "\t\tregister: "; 528 printRegName(interval.reg, tri_); 529 }); 530 531 SlotIndex baseIndex = MIIdx; 532 SlotIndex start = baseIndex.getDefIndex(); 533 // Earlyclobbers move back one. 534 if (MO.isEarlyClobber()) 535 start = MIIdx.getUseIndex(); 536 SlotIndex end = start; 537 538 // If it is not used after definition, it is considered dead at 539 // the instruction defining it. Hence its interval is: 540 // [defSlot(def), defSlot(def)+1) 541 // For earlyclobbers, the defSlot was pushed back one; the extra 542 // advance below compensates. 543 if (MO.isDead()) { 544 DEBUG(dbgs() << " dead"); 545 end = start.getStoreIndex(); 546 goto exit; 547 } 548 549 // If it is not dead on definition, it must be killed by a 550 // subsequent instruction. Hence its interval is: 551 // [defSlot(def), useSlot(kill)+1) 552 baseIndex = baseIndex.getNextIndex(); 553 while (++mi != MBB->end()) { 554 555 if (mi->isDebugValue()) 556 continue; 557 if (getInstructionFromIndex(baseIndex) == 0) 558 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 559 560 if (mi->killsRegister(interval.reg, tri_)) { 561 DEBUG(dbgs() << " killed"); 562 end = baseIndex.getDefIndex(); 563 goto exit; 564 } else { 565 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_); 566 if (DefIdx != -1) { 567 if (mi->isRegTiedToUseOperand(DefIdx)) { 568 // Two-address instruction. 569 end = baseIndex.getDefIndex(); 570 } else { 571 // Another instruction redefines the register before it is ever read. 572 // Then the register is essentially dead at the instruction that 573 // defines it. Hence its interval is: 574 // [defSlot(def), defSlot(def)+1) 575 DEBUG(dbgs() << " dead"); 576 end = start.getStoreIndex(); 577 } 578 goto exit; 579 } 580 } 581 582 baseIndex = baseIndex.getNextIndex(); 583 } 584 585 // The only case we should have a dead physreg here without a killing or 586 // instruction where we know it's dead is if it is live-in to the function 587 // and never used. Another possible case is the implicit use of the 588 // physical register has been deleted by two-address pass. 589 end = start.getStoreIndex(); 590 591exit: 592 assert(start < end && "did not find end of interval?"); 593 594 // Already exists? Extend old live interval. 595 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 596 bool Extend = OldLR != interval.end(); 597 VNInfo *ValNo = Extend 598 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 599 if (MO.isEarlyClobber() && Extend) 600 ValNo->setHasRedefByEC(true); 601 LiveRange LR(start, end, ValNo); 602 interval.addRange(LR); 603 LR.valno->addKill(end); 604 DEBUG(dbgs() << " +" << LR << '\n'); 605} 606 607void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 608 MachineBasicBlock::iterator MI, 609 SlotIndex MIIdx, 610 MachineOperand& MO, 611 unsigned MOIdx) { 612 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 613 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 614 getOrCreateInterval(MO.getReg())); 615 else if (allocatableRegs_[MO.getReg()]) { 616 MachineInstr *CopyMI = NULL; 617 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 618 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() || 619 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 620 CopyMI = MI; 621 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 622 getOrCreateInterval(MO.getReg()), CopyMI); 623 // Def of a register also defines its sub-registers. 624 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 625 // If MI also modifies the sub-register explicitly, avoid processing it 626 // more than once. Do not pass in TRI here so it checks for exact match. 627 if (!MI->definesRegister(*AS)) 628 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 629 getOrCreateInterval(*AS), 0); 630 } 631} 632 633void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 634 SlotIndex MIIdx, 635 LiveInterval &interval, bool isAlias) { 636 DEBUG({ 637 dbgs() << "\t\tlivein register: "; 638 printRegName(interval.reg, tri_); 639 }); 640 641 // Look for kills, if it reaches a def before it's killed, then it shouldn't 642 // be considered a livein. 643 MachineBasicBlock::iterator mi = MBB->begin(); 644 MachineBasicBlock::iterator E = MBB->end(); 645 // Skip over DBG_VALUE at the start of the MBB. 646 if (mi != E && mi->isDebugValue()) { 647 while (++mi != E && mi->isDebugValue()) 648 ; 649 if (mi == E) 650 // MBB is empty except for DBG_VALUE's. 651 return; 652 } 653 654 SlotIndex baseIndex = MIIdx; 655 SlotIndex start = baseIndex; 656 if (getInstructionFromIndex(baseIndex) == 0) 657 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 658 659 SlotIndex end = baseIndex; 660 bool SeenDefUse = false; 661 662 while (mi != E) { 663 if (mi->killsRegister(interval.reg, tri_)) { 664 DEBUG(dbgs() << " killed"); 665 end = baseIndex.getDefIndex(); 666 SeenDefUse = true; 667 break; 668 } else if (mi->definesRegister(interval.reg, tri_)) { 669 // Another instruction redefines the register before it is ever read. 670 // Then the register is essentially dead at the instruction that defines 671 // it. Hence its interval is: 672 // [defSlot(def), defSlot(def)+1) 673 DEBUG(dbgs() << " dead"); 674 end = start.getStoreIndex(); 675 SeenDefUse = true; 676 break; 677 } 678 679 while (++mi != E && mi->isDebugValue()) 680 // Skip over DBG_VALUE. 681 ; 682 if (mi != E) 683 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 684 } 685 686 // Live-in register might not be used at all. 687 if (!SeenDefUse) { 688 if (isAlias) { 689 DEBUG(dbgs() << " dead"); 690 end = MIIdx.getStoreIndex(); 691 } else { 692 DEBUG(dbgs() << " live through"); 693 end = baseIndex; 694 } 695 } 696 697 VNInfo *vni = 698 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 699 0, false, VNInfoAllocator); 700 vni->setIsPHIDef(true); 701 LiveRange LR(start, end, vni); 702 703 interval.addRange(LR); 704 LR.valno->addKill(end); 705 DEBUG(dbgs() << " +" << LR << '\n'); 706} 707 708/// computeIntervals - computes the live intervals for virtual 709/// registers. for some ordering of the machine instructions [1,N] a 710/// live interval is an interval [i, j) where 1 <= i <= j < N for 711/// which a variable is live 712void LiveIntervals::computeIntervals() { 713 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 714 << "********** Function: " 715 << ((Value*)mf_->getFunction())->getName() << '\n'); 716 717 SmallVector<unsigned, 8> UndefUses; 718 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 719 MBBI != E; ++MBBI) { 720 MachineBasicBlock *MBB = MBBI; 721 if (MBB->empty()) 722 continue; 723 724 // Track the index of the current machine instr. 725 SlotIndex MIIndex = getMBBStartIdx(MBB); 726 DEBUG(dbgs() << "BB#" << MBB->getNumber() 727 << ":\t\t# derived from " << MBB->getName() << "\n"); 728 729 // Create intervals for live-ins to this BB first. 730 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 731 LE = MBB->livein_end(); LI != LE; ++LI) { 732 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 733 // Multiple live-ins can alias the same register. 734 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 735 if (!hasInterval(*AS)) 736 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 737 true); 738 } 739 740 // Skip over empty initial indices. 741 if (getInstructionFromIndex(MIIndex) == 0) 742 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 743 744 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 745 MI != miEnd; ++MI) { 746 DEBUG(dbgs() << MIIndex << "\t" << *MI); 747 if (MI->isDebugValue()) 748 continue; 749 750 // Handle defs. 751 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 752 MachineOperand &MO = MI->getOperand(i); 753 if (!MO.isReg() || !MO.getReg()) 754 continue; 755 756 // handle register defs - build intervals 757 if (MO.isDef()) 758 handleRegisterDef(MBB, MI, MIIndex, MO, i); 759 else if (MO.isUndef()) 760 UndefUses.push_back(MO.getReg()); 761 } 762 763 // Move to the next instr slot. 764 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 765 } 766 } 767 768 // Create empty intervals for registers defined by implicit_def's (except 769 // for those implicit_def that define values which are liveout of their 770 // blocks. 771 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 772 unsigned UndefReg = UndefUses[i]; 773 (void)getOrCreateInterval(UndefReg); 774 } 775} 776 777LiveInterval* LiveIntervals::createInterval(unsigned reg) { 778 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 779 return new LiveInterval(reg, Weight); 780} 781 782/// dupInterval - Duplicate a live interval. The caller is responsible for 783/// managing the allocated memory. 784LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 785 LiveInterval *NewLI = createInterval(li->reg); 786 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 787 return NewLI; 788} 789 790/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 791/// copy field and returns the source register that defines it. 792unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 793 if (!VNI->getCopy()) 794 return 0; 795 796 if (VNI->getCopy()->isExtractSubreg()) { 797 // If it's extracting out of a physical register, return the sub-register. 798 unsigned Reg = VNI->getCopy()->getOperand(1).getReg(); 799 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 800 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm(); 801 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg(); 802 if (SrcSubReg == DstSubReg) 803 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3 804 // reg1034 can still be coalesced to EDX. 805 return Reg; 806 assert(DstSubReg == 0); 807 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm()); 808 } 809 return Reg; 810 } else if (VNI->getCopy()->isInsertSubreg() || 811 VNI->getCopy()->isSubregToReg()) 812 return VNI->getCopy()->getOperand(2).getReg(); 813 814 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 815 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg)) 816 return SrcReg; 817 llvm_unreachable("Unrecognized copy instruction!"); 818 return 0; 819} 820 821//===----------------------------------------------------------------------===// 822// Register allocator hooks. 823// 824 825/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 826/// allow one) virtual register operand, then its uses are implicitly using 827/// the register. Returns the virtual register. 828unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 829 MachineInstr *MI) const { 830 unsigned RegOp = 0; 831 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 832 MachineOperand &MO = MI->getOperand(i); 833 if (!MO.isReg() || !MO.isUse()) 834 continue; 835 unsigned Reg = MO.getReg(); 836 if (Reg == 0 || Reg == li.reg) 837 continue; 838 839 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 840 !allocatableRegs_[Reg]) 841 continue; 842 // FIXME: For now, only remat MI with at most one register operand. 843 assert(!RegOp && 844 "Can't rematerialize instruction with multiple register operand!"); 845 RegOp = MO.getReg(); 846#ifndef NDEBUG 847 break; 848#endif 849 } 850 return RegOp; 851} 852 853/// isValNoAvailableAt - Return true if the val# of the specified interval 854/// which reaches the given instruction also reaches the specified use index. 855bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 856 SlotIndex UseIdx) const { 857 SlotIndex Index = getInstructionIndex(MI); 858 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 859 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 860 return UI != li.end() && UI->valno == ValNo; 861} 862 863/// isReMaterializable - Returns true if the definition MI of the specified 864/// val# of the specified interval is re-materializable. 865bool LiveIntervals::isReMaterializable(const LiveInterval &li, 866 const VNInfo *ValNo, MachineInstr *MI, 867 SmallVectorImpl<LiveInterval*> &SpillIs, 868 bool &isLoad) { 869 if (DisableReMat) 870 return false; 871 872 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 873 return false; 874 875 // Target-specific code can mark an instruction as being rematerializable 876 // if it has one virtual reg use, though it had better be something like 877 // a PIC base register which is likely to be live everywhere. 878 unsigned ImpUse = getReMatImplicitUse(li, MI); 879 if (ImpUse) { 880 const LiveInterval &ImpLi = getInterval(ImpUse); 881 for (MachineRegisterInfo::use_nodbg_iterator 882 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end(); 883 ri != re; ++ri) { 884 MachineInstr *UseMI = &*ri; 885 SlotIndex UseIdx = getInstructionIndex(UseMI); 886 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 887 continue; 888 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 889 return false; 890 } 891 892 // If a register operand of the re-materialized instruction is going to 893 // be spilled next, then it's not legal to re-materialize this instruction. 894 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 895 if (ImpUse == SpillIs[i]->reg) 896 return false; 897 } 898 return true; 899} 900 901/// isReMaterializable - Returns true if the definition MI of the specified 902/// val# of the specified interval is re-materializable. 903bool LiveIntervals::isReMaterializable(const LiveInterval &li, 904 const VNInfo *ValNo, MachineInstr *MI) { 905 SmallVector<LiveInterval*, 4> Dummy1; 906 bool Dummy2; 907 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 908} 909 910/// isReMaterializable - Returns true if every definition of MI of every 911/// val# of the specified interval is re-materializable. 912bool LiveIntervals::isReMaterializable(const LiveInterval &li, 913 SmallVectorImpl<LiveInterval*> &SpillIs, 914 bool &isLoad) { 915 isLoad = false; 916 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 917 i != e; ++i) { 918 const VNInfo *VNI = *i; 919 if (VNI->isUnused()) 920 continue; // Dead val#. 921 // Is the def for the val# rematerializable? 922 if (!VNI->isDefAccurate()) 923 return false; 924 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 925 bool DefIsLoad = false; 926 if (!ReMatDefMI || 927 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 928 return false; 929 isLoad |= DefIsLoad; 930 } 931 return true; 932} 933 934/// FilterFoldedOps - Filter out two-address use operands. Return 935/// true if it finds any issue with the operands that ought to prevent 936/// folding. 937static bool FilterFoldedOps(MachineInstr *MI, 938 SmallVector<unsigned, 2> &Ops, 939 unsigned &MRInfo, 940 SmallVector<unsigned, 2> &FoldOps) { 941 MRInfo = 0; 942 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 943 unsigned OpIdx = Ops[i]; 944 MachineOperand &MO = MI->getOperand(OpIdx); 945 // FIXME: fold subreg use. 946 if (MO.getSubReg()) 947 return true; 948 if (MO.isDef()) 949 MRInfo |= (unsigned)VirtRegMap::isMod; 950 else { 951 // Filter out two-address use operand(s). 952 if (MI->isRegTiedToDefOperand(OpIdx)) { 953 MRInfo = VirtRegMap::isModRef; 954 continue; 955 } 956 MRInfo |= (unsigned)VirtRegMap::isRef; 957 } 958 FoldOps.push_back(OpIdx); 959 } 960 return false; 961} 962 963 964/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 965/// slot / to reg or any rematerialized load into ith operand of specified 966/// MI. If it is successul, MI is updated with the newly created MI and 967/// returns true. 968bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 969 VirtRegMap &vrm, MachineInstr *DefMI, 970 SlotIndex InstrIdx, 971 SmallVector<unsigned, 2> &Ops, 972 bool isSS, int Slot, unsigned Reg) { 973 // If it is an implicit def instruction, just delete it. 974 if (MI->isImplicitDef()) { 975 RemoveMachineInstrFromMaps(MI); 976 vrm.RemoveMachineInstrFromMaps(MI); 977 MI->eraseFromParent(); 978 ++numFolds; 979 return true; 980 } 981 982 // Filter the list of operand indexes that are to be folded. Abort if 983 // any operand will prevent folding. 984 unsigned MRInfo = 0; 985 SmallVector<unsigned, 2> FoldOps; 986 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 987 return false; 988 989 // The only time it's safe to fold into a two address instruction is when 990 // it's folding reload and spill from / into a spill stack slot. 991 if (DefMI && (MRInfo & VirtRegMap::isMod)) 992 return false; 993 994 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 995 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 996 if (fmi) { 997 // Remember this instruction uses the spill slot. 998 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 999 1000 // Attempt to fold the memory reference into the instruction. If 1001 // we can do this, we don't need to insert spill code. 1002 MachineBasicBlock &MBB = *MI->getParent(); 1003 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 1004 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 1005 vrm.transferSpillPts(MI, fmi); 1006 vrm.transferRestorePts(MI, fmi); 1007 vrm.transferEmergencySpills(MI, fmi); 1008 ReplaceMachineInstrInMaps(MI, fmi); 1009 MI = MBB.insert(MBB.erase(MI), fmi); 1010 ++numFolds; 1011 return true; 1012 } 1013 return false; 1014} 1015 1016/// canFoldMemoryOperand - Returns true if the specified load / store 1017/// folding is possible. 1018bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 1019 SmallVector<unsigned, 2> &Ops, 1020 bool ReMat) const { 1021 // Filter the list of operand indexes that are to be folded. Abort if 1022 // any operand will prevent folding. 1023 unsigned MRInfo = 0; 1024 SmallVector<unsigned, 2> FoldOps; 1025 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 1026 return false; 1027 1028 // It's only legal to remat for a use, not a def. 1029 if (ReMat && (MRInfo & VirtRegMap::isMod)) 1030 return false; 1031 1032 return tii_->canFoldMemoryOperand(MI, FoldOps); 1033} 1034 1035bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 1036 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 1037 1038 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 1039 1040 if (mbb == 0) 1041 return false; 1042 1043 for (++itr; itr != li.ranges.end(); ++itr) { 1044 MachineBasicBlock *mbb2 = 1045 indexes_->getMBBCoveringRange(itr->start, itr->end); 1046 1047 if (mbb2 != mbb) 1048 return false; 1049 } 1050 1051 return true; 1052} 1053 1054/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1055/// interval on to-be re-materialized operands of MI) with new register. 1056void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1057 MachineInstr *MI, unsigned NewVReg, 1058 VirtRegMap &vrm) { 1059 // There is an implicit use. That means one of the other operand is 1060 // being remat'ed and the remat'ed instruction has li.reg as an 1061 // use operand. Make sure we rewrite that as well. 1062 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1063 MachineOperand &MO = MI->getOperand(i); 1064 if (!MO.isReg()) 1065 continue; 1066 unsigned Reg = MO.getReg(); 1067 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1068 continue; 1069 if (!vrm.isReMaterialized(Reg)) 1070 continue; 1071 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1072 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1073 if (UseMO) 1074 UseMO->setReg(NewVReg); 1075 } 1076} 1077 1078/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1079/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1080bool LiveIntervals:: 1081rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1082 bool TrySplit, SlotIndex index, SlotIndex end, 1083 MachineInstr *MI, 1084 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1085 unsigned Slot, int LdSlot, 1086 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1087 VirtRegMap &vrm, 1088 const TargetRegisterClass* rc, 1089 SmallVector<int, 4> &ReMatIds, 1090 const MachineLoopInfo *loopInfo, 1091 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1092 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1093 std::vector<LiveInterval*> &NewLIs) { 1094 bool CanFold = false; 1095 RestartInstruction: 1096 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1097 MachineOperand& mop = MI->getOperand(i); 1098 if (!mop.isReg()) 1099 continue; 1100 unsigned Reg = mop.getReg(); 1101 unsigned RegI = Reg; 1102 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1103 continue; 1104 if (Reg != li.reg) 1105 continue; 1106 1107 bool TryFold = !DefIsReMat; 1108 bool FoldSS = true; // Default behavior unless it's a remat. 1109 int FoldSlot = Slot; 1110 if (DefIsReMat) { 1111 // If this is the rematerializable definition MI itself and 1112 // all of its uses are rematerialized, simply delete it. 1113 if (MI == ReMatOrigDefMI && CanDelete) { 1114 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " 1115 << *MI << '\n'); 1116 RemoveMachineInstrFromMaps(MI); 1117 vrm.RemoveMachineInstrFromMaps(MI); 1118 MI->eraseFromParent(); 1119 break; 1120 } 1121 1122 // If def for this use can't be rematerialized, then try folding. 1123 // If def is rematerializable and it's a load, also try folding. 1124 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1125 if (isLoad) { 1126 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1127 FoldSS = isLoadSS; 1128 FoldSlot = LdSlot; 1129 } 1130 } 1131 1132 // Scan all of the operands of this instruction rewriting operands 1133 // to use NewVReg instead of li.reg as appropriate. We do this for 1134 // two reasons: 1135 // 1136 // 1. If the instr reads the same spilled vreg multiple times, we 1137 // want to reuse the NewVReg. 1138 // 2. If the instr is a two-addr instruction, we are required to 1139 // keep the src/dst regs pinned. 1140 // 1141 // Keep track of whether we replace a use and/or def so that we can 1142 // create the spill interval with the appropriate range. 1143 1144 HasUse = mop.isUse(); 1145 HasDef = mop.isDef(); 1146 SmallVector<unsigned, 2> Ops; 1147 Ops.push_back(i); 1148 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 1149 const MachineOperand &MOj = MI->getOperand(j); 1150 if (!MOj.isReg()) 1151 continue; 1152 unsigned RegJ = MOj.getReg(); 1153 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 1154 continue; 1155 if (RegJ == RegI) { 1156 Ops.push_back(j); 1157 if (!MOj.isUndef()) { 1158 HasUse |= MOj.isUse(); 1159 HasDef |= MOj.isDef(); 1160 } 1161 } 1162 } 1163 1164 // Create a new virtual register for the spill interval. 1165 // Create the new register now so we can map the fold instruction 1166 // to the new register so when it is unfolded we get the correct 1167 // answer. 1168 bool CreatedNewVReg = false; 1169 if (NewVReg == 0) { 1170 NewVReg = mri_->createVirtualRegister(rc); 1171 vrm.grow(); 1172 CreatedNewVReg = true; 1173 1174 // The new virtual register should get the same allocation hints as the 1175 // old one. 1176 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1177 if (Hint.first || Hint.second) 1178 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1179 } 1180 1181 if (!TryFold) 1182 CanFold = false; 1183 else { 1184 // Do not fold load / store here if we are splitting. We'll find an 1185 // optimal point to insert a load / store later. 1186 if (!TrySplit) { 1187 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1188 Ops, FoldSS, FoldSlot, NewVReg)) { 1189 // Folding the load/store can completely change the instruction in 1190 // unpredictable ways, rescan it from the beginning. 1191 1192 if (FoldSS) { 1193 // We need to give the new vreg the same stack slot as the 1194 // spilled interval. 1195 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1196 } 1197 1198 HasUse = false; 1199 HasDef = false; 1200 CanFold = false; 1201 if (isNotInMIMap(MI)) 1202 break; 1203 goto RestartInstruction; 1204 } 1205 } else { 1206 // We'll try to fold it later if it's profitable. 1207 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1208 } 1209 } 1210 1211 mop.setReg(NewVReg); 1212 if (mop.isImplicit()) 1213 rewriteImplicitOps(li, MI, NewVReg, vrm); 1214 1215 // Reuse NewVReg for other reads. 1216 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1217 MachineOperand &mopj = MI->getOperand(Ops[j]); 1218 mopj.setReg(NewVReg); 1219 if (mopj.isImplicit()) 1220 rewriteImplicitOps(li, MI, NewVReg, vrm); 1221 } 1222 1223 if (CreatedNewVReg) { 1224 if (DefIsReMat) { 1225 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1226 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1227 // Each valnum may have its own remat id. 1228 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1229 } else { 1230 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1231 } 1232 if (!CanDelete || (HasUse && HasDef)) { 1233 // If this is a two-addr instruction then its use operands are 1234 // rematerializable but its def is not. It should be assigned a 1235 // stack slot. 1236 vrm.assignVirt2StackSlot(NewVReg, Slot); 1237 } 1238 } else { 1239 vrm.assignVirt2StackSlot(NewVReg, Slot); 1240 } 1241 } else if (HasUse && HasDef && 1242 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1243 // If this interval hasn't been assigned a stack slot (because earlier 1244 // def is a deleted remat def), do it now. 1245 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1246 vrm.assignVirt2StackSlot(NewVReg, Slot); 1247 } 1248 1249 // Re-matting an instruction with virtual register use. Add the 1250 // register as an implicit use on the use MI. 1251 if (DefIsReMat && ImpUse) 1252 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1253 1254 // Create a new register interval for this spill / remat. 1255 LiveInterval &nI = getOrCreateInterval(NewVReg); 1256 if (CreatedNewVReg) { 1257 NewLIs.push_back(&nI); 1258 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1259 if (TrySplit) 1260 vrm.setIsSplitFromReg(NewVReg, li.reg); 1261 } 1262 1263 if (HasUse) { 1264 if (CreatedNewVReg) { 1265 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1266 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1267 DEBUG(dbgs() << " +" << LR); 1268 nI.addRange(LR); 1269 } else { 1270 // Extend the split live interval to this def / use. 1271 SlotIndex End = index.getDefIndex(); 1272 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1273 nI.getValNumInfo(nI.getNumValNums()-1)); 1274 DEBUG(dbgs() << " +" << LR); 1275 nI.addRange(LR); 1276 } 1277 } 1278 if (HasDef) { 1279 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1280 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1281 DEBUG(dbgs() << " +" << LR); 1282 nI.addRange(LR); 1283 } 1284 1285 DEBUG({ 1286 dbgs() << "\t\t\t\tAdded new interval: "; 1287 nI.print(dbgs(), tri_); 1288 dbgs() << '\n'; 1289 }); 1290 } 1291 return CanFold; 1292} 1293bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1294 const VNInfo *VNI, 1295 MachineBasicBlock *MBB, 1296 SlotIndex Idx) const { 1297 SlotIndex End = getMBBEndIdx(MBB); 1298 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1299 if (VNI->kills[j].isPHI()) 1300 continue; 1301 1302 SlotIndex KillIdx = VNI->kills[j]; 1303 if (KillIdx > Idx && KillIdx <= End) 1304 return true; 1305 } 1306 return false; 1307} 1308 1309/// RewriteInfo - Keep track of machine instrs that will be rewritten 1310/// during spilling. 1311namespace { 1312 struct RewriteInfo { 1313 SlotIndex Index; 1314 MachineInstr *MI; 1315 bool HasUse; 1316 bool HasDef; 1317 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d) 1318 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1319 }; 1320 1321 struct RewriteInfoCompare { 1322 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1323 return LHS.Index < RHS.Index; 1324 } 1325 }; 1326} 1327 1328void LiveIntervals:: 1329rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1330 LiveInterval::Ranges::const_iterator &I, 1331 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1332 unsigned Slot, int LdSlot, 1333 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1334 VirtRegMap &vrm, 1335 const TargetRegisterClass* rc, 1336 SmallVector<int, 4> &ReMatIds, 1337 const MachineLoopInfo *loopInfo, 1338 BitVector &SpillMBBs, 1339 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1340 BitVector &RestoreMBBs, 1341 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1342 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1343 std::vector<LiveInterval*> &NewLIs) { 1344 bool AllCanFold = true; 1345 unsigned NewVReg = 0; 1346 SlotIndex start = I->start.getBaseIndex(); 1347 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1348 1349 // First collect all the def / use in this live range that will be rewritten. 1350 // Make sure they are sorted according to instruction index. 1351 std::vector<RewriteInfo> RewriteMIs; 1352 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1353 re = mri_->reg_end(); ri != re; ) { 1354 MachineInstr *MI = &*ri; 1355 MachineOperand &O = ri.getOperand(); 1356 ++ri; 1357 if (MI->isDebugValue()) { 1358 // Modify DBG_VALUE now that the value is in a spill slot. 1359 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { 1360 uint64_t Offset = MI->getOperand(1).getImm(); 1361 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1362 DebugLoc DL = MI->getDebugLoc(); 1363 int FI = isLoadSS ? LdSlot : (int)Slot; 1364 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, 1365 Offset, MDPtr, DL)) { 1366 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1367 ReplaceMachineInstrInMaps(MI, NewDV); 1368 MachineBasicBlock *MBB = MI->getParent(); 1369 MBB->insert(MBB->erase(MI), NewDV); 1370 continue; 1371 } 1372 } 1373 1374 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1375 RemoveMachineInstrFromMaps(MI); 1376 vrm.RemoveMachineInstrFromMaps(MI); 1377 MI->eraseFromParent(); 1378 continue; 1379 } 1380 assert(!(O.isImplicit() && O.isUse()) && 1381 "Spilling register that's used as implicit use?"); 1382 SlotIndex index = getInstructionIndex(MI); 1383 if (index < start || index >= end) 1384 continue; 1385 1386 if (O.isUndef()) 1387 // Must be defined by an implicit def. It should not be spilled. Note, 1388 // this is for correctness reason. e.g. 1389 // 8 %reg1024<def> = IMPLICIT_DEF 1390 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1391 // The live range [12, 14) are not part of the r1024 live interval since 1392 // it's defined by an implicit def. It will not conflicts with live 1393 // interval of r1025. Now suppose both registers are spilled, you can 1394 // easily see a situation where both registers are reloaded before 1395 // the INSERT_SUBREG and both target registers that would overlap. 1396 continue; 1397 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1398 } 1399 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1400 1401 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1402 // Now rewrite the defs and uses. 1403 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1404 RewriteInfo &rwi = RewriteMIs[i]; 1405 ++i; 1406 SlotIndex index = rwi.Index; 1407 bool MIHasUse = rwi.HasUse; 1408 bool MIHasDef = rwi.HasDef; 1409 MachineInstr *MI = rwi.MI; 1410 // If MI def and/or use the same register multiple times, then there 1411 // are multiple entries. 1412 unsigned NumUses = MIHasUse; 1413 while (i != e && RewriteMIs[i].MI == MI) { 1414 assert(RewriteMIs[i].Index == index); 1415 bool isUse = RewriteMIs[i].HasUse; 1416 if (isUse) ++NumUses; 1417 MIHasUse |= isUse; 1418 MIHasDef |= RewriteMIs[i].HasDef; 1419 ++i; 1420 } 1421 MachineBasicBlock *MBB = MI->getParent(); 1422 1423 if (ImpUse && MI != ReMatDefMI) { 1424 // Re-matting an instruction with virtual register use. Prevent interval 1425 // from being spilled. 1426 getInterval(ImpUse).markNotSpillable(); 1427 } 1428 1429 unsigned MBBId = MBB->getNumber(); 1430 unsigned ThisVReg = 0; 1431 if (TrySplit) { 1432 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1433 if (NVI != MBBVRegsMap.end()) { 1434 ThisVReg = NVI->second; 1435 // One common case: 1436 // x = use 1437 // ... 1438 // ... 1439 // def = ... 1440 // = use 1441 // It's better to start a new interval to avoid artifically 1442 // extend the new interval. 1443 if (MIHasDef && !MIHasUse) { 1444 MBBVRegsMap.erase(MBB->getNumber()); 1445 ThisVReg = 0; 1446 } 1447 } 1448 } 1449 1450 bool IsNew = ThisVReg == 0; 1451 if (IsNew) { 1452 // This ends the previous live interval. If all of its def / use 1453 // can be folded, give it a low spill weight. 1454 if (NewVReg && TrySplit && AllCanFold) { 1455 LiveInterval &nI = getOrCreateInterval(NewVReg); 1456 nI.weight /= 10.0F; 1457 } 1458 AllCanFold = true; 1459 } 1460 NewVReg = ThisVReg; 1461 1462 bool HasDef = false; 1463 bool HasUse = false; 1464 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1465 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1466 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1467 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1468 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1469 if (!HasDef && !HasUse) 1470 continue; 1471 1472 AllCanFold &= CanFold; 1473 1474 // Update weight of spill interval. 1475 LiveInterval &nI = getOrCreateInterval(NewVReg); 1476 if (!TrySplit) { 1477 // The spill weight is now infinity as it cannot be spilled again. 1478 nI.markNotSpillable(); 1479 continue; 1480 } 1481 1482 // Keep track of the last def and first use in each MBB. 1483 if (HasDef) { 1484 if (MI != ReMatOrigDefMI || !CanDelete) { 1485 bool HasKill = false; 1486 if (!HasUse) 1487 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1488 else { 1489 // If this is a two-address code, then this index starts a new VNInfo. 1490 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1491 if (VNI) 1492 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1493 } 1494 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1495 SpillIdxes.find(MBBId); 1496 if (!HasKill) { 1497 if (SII == SpillIdxes.end()) { 1498 std::vector<SRInfo> S; 1499 S.push_back(SRInfo(index, NewVReg, true)); 1500 SpillIdxes.insert(std::make_pair(MBBId, S)); 1501 } else if (SII->second.back().vreg != NewVReg) { 1502 SII->second.push_back(SRInfo(index, NewVReg, true)); 1503 } else if (index > SII->second.back().index) { 1504 // If there is an earlier def and this is a two-address 1505 // instruction, then it's not possible to fold the store (which 1506 // would also fold the load). 1507 SRInfo &Info = SII->second.back(); 1508 Info.index = index; 1509 Info.canFold = !HasUse; 1510 } 1511 SpillMBBs.set(MBBId); 1512 } else if (SII != SpillIdxes.end() && 1513 SII->second.back().vreg == NewVReg && 1514 index > SII->second.back().index) { 1515 // There is an earlier def that's not killed (must be two-address). 1516 // The spill is no longer needed. 1517 SII->second.pop_back(); 1518 if (SII->second.empty()) { 1519 SpillIdxes.erase(MBBId); 1520 SpillMBBs.reset(MBBId); 1521 } 1522 } 1523 } 1524 } 1525 1526 if (HasUse) { 1527 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1528 SpillIdxes.find(MBBId); 1529 if (SII != SpillIdxes.end() && 1530 SII->second.back().vreg == NewVReg && 1531 index > SII->second.back().index) 1532 // Use(s) following the last def, it's not safe to fold the spill. 1533 SII->second.back().canFold = false; 1534 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1535 RestoreIdxes.find(MBBId); 1536 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1537 // If we are splitting live intervals, only fold if it's the first 1538 // use and there isn't another use later in the MBB. 1539 RII->second.back().canFold = false; 1540 else if (IsNew) { 1541 // Only need a reload if there isn't an earlier def / use. 1542 if (RII == RestoreIdxes.end()) { 1543 std::vector<SRInfo> Infos; 1544 Infos.push_back(SRInfo(index, NewVReg, true)); 1545 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1546 } else { 1547 RII->second.push_back(SRInfo(index, NewVReg, true)); 1548 } 1549 RestoreMBBs.set(MBBId); 1550 } 1551 } 1552 1553 // Update spill weight. 1554 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1555 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1556 } 1557 1558 if (NewVReg && TrySplit && AllCanFold) { 1559 // If all of its def / use can be folded, give it a low spill weight. 1560 LiveInterval &nI = getOrCreateInterval(NewVReg); 1561 nI.weight /= 10.0F; 1562 } 1563} 1564 1565bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1566 unsigned vr, BitVector &RestoreMBBs, 1567 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1568 if (!RestoreMBBs[Id]) 1569 return false; 1570 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1571 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1572 if (Restores[i].index == index && 1573 Restores[i].vreg == vr && 1574 Restores[i].canFold) 1575 return true; 1576 return false; 1577} 1578 1579void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1580 unsigned vr, BitVector &RestoreMBBs, 1581 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1582 if (!RestoreMBBs[Id]) 1583 return; 1584 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1585 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1586 if (Restores[i].index == index && Restores[i].vreg) 1587 Restores[i].index = SlotIndex(); 1588} 1589 1590/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1591/// spilled and create empty intervals for their uses. 1592void 1593LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1594 const TargetRegisterClass* rc, 1595 std::vector<LiveInterval*> &NewLIs) { 1596 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1597 re = mri_->reg_end(); ri != re; ) { 1598 MachineOperand &O = ri.getOperand(); 1599 MachineInstr *MI = &*ri; 1600 ++ri; 1601 if (MI->isDebugValue()) { 1602 // Remove debug info for now. 1603 O.setReg(0U); 1604 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1605 continue; 1606 } 1607 if (O.isDef()) { 1608 assert(MI->isImplicitDef() && 1609 "Register def was not rewritten?"); 1610 RemoveMachineInstrFromMaps(MI); 1611 vrm.RemoveMachineInstrFromMaps(MI); 1612 MI->eraseFromParent(); 1613 } else { 1614 // This must be an use of an implicit_def so it's not part of the live 1615 // interval. Create a new empty live interval for it. 1616 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1617 unsigned NewVReg = mri_->createVirtualRegister(rc); 1618 vrm.grow(); 1619 vrm.setIsImplicitlyDefined(NewVReg); 1620 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1621 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1622 MachineOperand &MO = MI->getOperand(i); 1623 if (MO.isReg() && MO.getReg() == li.reg) { 1624 MO.setReg(NewVReg); 1625 MO.setIsUndef(); 1626 } 1627 } 1628 } 1629 } 1630} 1631 1632float 1633LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 1634 // Limit the loop depth ridiculousness. 1635 if (loopDepth > 200) 1636 loopDepth = 200; 1637 1638 // The loop depth is used to roughly estimate the number of times the 1639 // instruction is executed. Something like 10^d is simple, but will quickly 1640 // overflow a float. This expression behaves like 10^d for small d, but is 1641 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 1642 // headroom before overflow. 1643 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth); 1644 1645 return (isDef + isUse) * lc; 1646} 1647 1648void 1649LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) { 1650 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) 1651 normalizeSpillWeight(*NewLIs[i]); 1652} 1653 1654std::vector<LiveInterval*> LiveIntervals:: 1655addIntervalsForSpillsFast(const LiveInterval &li, 1656 const MachineLoopInfo *loopInfo, 1657 VirtRegMap &vrm) { 1658 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1659 1660 std::vector<LiveInterval*> added; 1661 1662 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1663 1664 DEBUG({ 1665 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1666 li.dump(); 1667 dbgs() << '\n'; 1668 }); 1669 1670 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1671 1672 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg); 1673 while (RI != mri_->reg_end()) { 1674 MachineInstr* MI = &*RI; 1675 1676 SmallVector<unsigned, 2> Indices; 1677 bool HasUse = false; 1678 bool HasDef = false; 1679 1680 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1681 MachineOperand& mop = MI->getOperand(i); 1682 if (!mop.isReg() || mop.getReg() != li.reg) continue; 1683 1684 HasUse |= MI->getOperand(i).isUse(); 1685 HasDef |= MI->getOperand(i).isDef(); 1686 1687 Indices.push_back(i); 1688 } 1689 1690 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI), 1691 Indices, true, slot, li.reg)) { 1692 unsigned NewVReg = mri_->createVirtualRegister(rc); 1693 vrm.grow(); 1694 vrm.assignVirt2StackSlot(NewVReg, slot); 1695 1696 // create a new register for this spill 1697 LiveInterval &nI = getOrCreateInterval(NewVReg); 1698 nI.markNotSpillable(); 1699 1700 // Rewrite register operands to use the new vreg. 1701 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(), 1702 E = Indices.end(); I != E; ++I) { 1703 MI->getOperand(*I).setReg(NewVReg); 1704 1705 if (MI->getOperand(*I).isUse()) 1706 MI->getOperand(*I).setIsKill(true); 1707 } 1708 1709 // Fill in the new live interval. 1710 SlotIndex index = getInstructionIndex(MI); 1711 if (HasUse) { 1712 LiveRange LR(index.getLoadIndex(), index.getUseIndex(), 1713 nI.getNextValue(SlotIndex(), 0, false, 1714 getVNInfoAllocator())); 1715 DEBUG(dbgs() << " +" << LR); 1716 nI.addRange(LR); 1717 vrm.addRestorePoint(NewVReg, MI); 1718 } 1719 if (HasDef) { 1720 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1721 nI.getNextValue(SlotIndex(), 0, false, 1722 getVNInfoAllocator())); 1723 DEBUG(dbgs() << " +" << LR); 1724 nI.addRange(LR); 1725 vrm.addSpillPoint(NewVReg, true, MI); 1726 } 1727 1728 added.push_back(&nI); 1729 1730 DEBUG({ 1731 dbgs() << "\t\t\t\tadded new interval: "; 1732 nI.dump(); 1733 dbgs() << '\n'; 1734 }); 1735 } 1736 1737 1738 RI = mri_->reg_begin(li.reg); 1739 } 1740 1741 return added; 1742} 1743 1744std::vector<LiveInterval*> LiveIntervals:: 1745addIntervalsForSpills(const LiveInterval &li, 1746 SmallVectorImpl<LiveInterval*> &SpillIs, 1747 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1748 1749 if (EnableFastSpilling) 1750 return addIntervalsForSpillsFast(li, loopInfo, vrm); 1751 1752 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1753 1754 DEBUG({ 1755 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1756 li.print(dbgs(), tri_); 1757 dbgs() << '\n'; 1758 }); 1759 1760 // Each bit specify whether a spill is required in the MBB. 1761 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1762 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1763 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1764 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1765 DenseMap<unsigned,unsigned> MBBVRegsMap; 1766 std::vector<LiveInterval*> NewLIs; 1767 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1768 1769 unsigned NumValNums = li.getNumValNums(); 1770 SmallVector<MachineInstr*, 4> ReMatDefs; 1771 ReMatDefs.resize(NumValNums, NULL); 1772 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1773 ReMatOrigDefs.resize(NumValNums, NULL); 1774 SmallVector<int, 4> ReMatIds; 1775 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1776 BitVector ReMatDelete(NumValNums); 1777 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1778 1779 // Spilling a split live interval. It cannot be split any further. Also, 1780 // it's also guaranteed to be a single val# / range interval. 1781 if (vrm.getPreSplitReg(li.reg)) { 1782 vrm.setIsSplitFromReg(li.reg, 0); 1783 // Unset the split kill marker on the last use. 1784 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1785 if (KillIdx != SlotIndex()) { 1786 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1787 assert(KillMI && "Last use disappeared?"); 1788 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1789 assert(KillOp != -1 && "Last use disappeared?"); 1790 KillMI->getOperand(KillOp).setIsKill(false); 1791 } 1792 vrm.removeKillPoint(li.reg); 1793 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1794 Slot = vrm.getStackSlot(li.reg); 1795 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1796 MachineInstr *ReMatDefMI = DefIsReMat ? 1797 vrm.getReMaterializedMI(li.reg) : NULL; 1798 int LdSlot = 0; 1799 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1800 bool isLoad = isLoadSS || 1801 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1802 bool IsFirstRange = true; 1803 for (LiveInterval::Ranges::const_iterator 1804 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1805 // If this is a split live interval with multiple ranges, it means there 1806 // are two-address instructions that re-defined the value. Only the 1807 // first def can be rematerialized! 1808 if (IsFirstRange) { 1809 // Note ReMatOrigDefMI has already been deleted. 1810 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1811 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1812 false, vrm, rc, ReMatIds, loopInfo, 1813 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1814 MBBVRegsMap, NewLIs); 1815 } else { 1816 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1817 Slot, 0, false, false, false, 1818 false, vrm, rc, ReMatIds, loopInfo, 1819 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1820 MBBVRegsMap, NewLIs); 1821 } 1822 IsFirstRange = false; 1823 } 1824 1825 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1826 normalizeSpillWeights(NewLIs); 1827 return NewLIs; 1828 } 1829 1830 bool TrySplit = !intervalIsInOneMBB(li); 1831 if (TrySplit) 1832 ++numSplits; 1833 bool NeedStackSlot = false; 1834 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1835 i != e; ++i) { 1836 const VNInfo *VNI = *i; 1837 unsigned VN = VNI->id; 1838 if (VNI->isUnused()) 1839 continue; // Dead val#. 1840 // Is the def for the val# rematerializable? 1841 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1842 ? getInstructionFromIndex(VNI->def) : 0; 1843 bool dummy; 1844 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1845 // Remember how to remat the def of this val#. 1846 ReMatOrigDefs[VN] = ReMatDefMI; 1847 // Original def may be modified so we have to make a copy here. 1848 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1849 CloneMIs.push_back(Clone); 1850 ReMatDefs[VN] = Clone; 1851 1852 bool CanDelete = true; 1853 if (VNI->hasPHIKill()) { 1854 // A kill is a phi node, not all of its uses can be rematerialized. 1855 // It must not be deleted. 1856 CanDelete = false; 1857 // Need a stack slot if there is any live range where uses cannot be 1858 // rematerialized. 1859 NeedStackSlot = true; 1860 } 1861 if (CanDelete) 1862 ReMatDelete.set(VN); 1863 } else { 1864 // Need a stack slot if there is any live range where uses cannot be 1865 // rematerialized. 1866 NeedStackSlot = true; 1867 } 1868 } 1869 1870 // One stack slot per live interval. 1871 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1872 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1873 Slot = vrm.assignVirt2StackSlot(li.reg); 1874 1875 // This case only occurs when the prealloc splitter has already assigned 1876 // a stack slot to this vreg. 1877 else 1878 Slot = vrm.getStackSlot(li.reg); 1879 } 1880 1881 // Create new intervals and rewrite defs and uses. 1882 for (LiveInterval::Ranges::const_iterator 1883 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1884 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1885 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1886 bool DefIsReMat = ReMatDefMI != NULL; 1887 bool CanDelete = ReMatDelete[I->valno->id]; 1888 int LdSlot = 0; 1889 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1890 bool isLoad = isLoadSS || 1891 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1892 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1893 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1894 CanDelete, vrm, rc, ReMatIds, loopInfo, 1895 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1896 MBBVRegsMap, NewLIs); 1897 } 1898 1899 // Insert spills / restores if we are splitting. 1900 if (!TrySplit) { 1901 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1902 normalizeSpillWeights(NewLIs); 1903 return NewLIs; 1904 } 1905 1906 SmallPtrSet<LiveInterval*, 4> AddedKill; 1907 SmallVector<unsigned, 2> Ops; 1908 if (NeedStackSlot) { 1909 int Id = SpillMBBs.find_first(); 1910 while (Id != -1) { 1911 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1912 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1913 SlotIndex index = spills[i].index; 1914 unsigned VReg = spills[i].vreg; 1915 LiveInterval &nI = getOrCreateInterval(VReg); 1916 bool isReMat = vrm.isReMaterialized(VReg); 1917 MachineInstr *MI = getInstructionFromIndex(index); 1918 bool CanFold = false; 1919 bool FoundUse = false; 1920 Ops.clear(); 1921 if (spills[i].canFold) { 1922 CanFold = true; 1923 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1924 MachineOperand &MO = MI->getOperand(j); 1925 if (!MO.isReg() || MO.getReg() != VReg) 1926 continue; 1927 1928 Ops.push_back(j); 1929 if (MO.isDef()) 1930 continue; 1931 if (isReMat || 1932 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1933 RestoreMBBs, RestoreIdxes))) { 1934 // MI has two-address uses of the same register. If the use 1935 // isn't the first and only use in the BB, then we can't fold 1936 // it. FIXME: Move this to rewriteInstructionsForSpills. 1937 CanFold = false; 1938 break; 1939 } 1940 FoundUse = true; 1941 } 1942 } 1943 // Fold the store into the def if possible. 1944 bool Folded = false; 1945 if (CanFold && !Ops.empty()) { 1946 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1947 Folded = true; 1948 if (FoundUse) { 1949 // Also folded uses, do not issue a load. 1950 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1951 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1952 } 1953 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1954 } 1955 } 1956 1957 // Otherwise tell the spiller to issue a spill. 1958 if (!Folded) { 1959 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1960 bool isKill = LR->end == index.getStoreIndex(); 1961 if (!MI->registerDefIsDead(nI.reg)) 1962 // No need to spill a dead def. 1963 vrm.addSpillPoint(VReg, isKill, MI); 1964 if (isKill) 1965 AddedKill.insert(&nI); 1966 } 1967 } 1968 Id = SpillMBBs.find_next(Id); 1969 } 1970 } 1971 1972 int Id = RestoreMBBs.find_first(); 1973 while (Id != -1) { 1974 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1975 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1976 SlotIndex index = restores[i].index; 1977 if (index == SlotIndex()) 1978 continue; 1979 unsigned VReg = restores[i].vreg; 1980 LiveInterval &nI = getOrCreateInterval(VReg); 1981 bool isReMat = vrm.isReMaterialized(VReg); 1982 MachineInstr *MI = getInstructionFromIndex(index); 1983 bool CanFold = false; 1984 Ops.clear(); 1985 if (restores[i].canFold) { 1986 CanFold = true; 1987 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1988 MachineOperand &MO = MI->getOperand(j); 1989 if (!MO.isReg() || MO.getReg() != VReg) 1990 continue; 1991 1992 if (MO.isDef()) { 1993 // If this restore were to be folded, it would have been folded 1994 // already. 1995 CanFold = false; 1996 break; 1997 } 1998 Ops.push_back(j); 1999 } 2000 } 2001 2002 // Fold the load into the use if possible. 2003 bool Folded = false; 2004 if (CanFold && !Ops.empty()) { 2005 if (!isReMat) 2006 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 2007 else { 2008 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 2009 int LdSlot = 0; 2010 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 2011 // If the rematerializable def is a load, also try to fold it. 2012 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 2013 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 2014 Ops, isLoadSS, LdSlot, VReg); 2015 if (!Folded) { 2016 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 2017 if (ImpUse) { 2018 // Re-matting an instruction with virtual register use. Add the 2019 // register as an implicit use on the use MI and mark the register 2020 // interval as unspillable. 2021 LiveInterval &ImpLi = getInterval(ImpUse); 2022 ImpLi.markNotSpillable(); 2023 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 2024 } 2025 } 2026 } 2027 } 2028 // If folding is not possible / failed, then tell the spiller to issue a 2029 // load / rematerialization for us. 2030 if (Folded) 2031 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 2032 else 2033 vrm.addRestorePoint(VReg, MI); 2034 } 2035 Id = RestoreMBBs.find_next(Id); 2036 } 2037 2038 // Finalize intervals: add kills, finalize spill weights, and filter out 2039 // dead intervals. 2040 std::vector<LiveInterval*> RetNewLIs; 2041 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 2042 LiveInterval *LI = NewLIs[i]; 2043 if (!LI->empty()) { 2044 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 2045 if (!AddedKill.count(LI)) { 2046 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 2047 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 2048 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 2049 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 2050 assert(UseIdx != -1); 2051 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 2052 LastUse->getOperand(UseIdx).setIsKill(); 2053 vrm.addKillPoint(LI->reg, LastUseIdx); 2054 } 2055 } 2056 RetNewLIs.push_back(LI); 2057 } 2058 } 2059 2060 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 2061 normalizeSpillWeights(RetNewLIs); 2062 return RetNewLIs; 2063} 2064 2065/// hasAllocatableSuperReg - Return true if the specified physical register has 2066/// any super register that's allocatable. 2067bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 2068 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 2069 if (allocatableRegs_[*AS] && hasInterval(*AS)) 2070 return true; 2071 return false; 2072} 2073 2074/// getRepresentativeReg - Find the largest super register of the specified 2075/// physical register. 2076unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 2077 // Find the largest super-register that is allocatable. 2078 unsigned BestReg = Reg; 2079 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 2080 unsigned SuperReg = *AS; 2081 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 2082 BestReg = SuperReg; 2083 break; 2084 } 2085 } 2086 return BestReg; 2087} 2088 2089/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 2090/// specified interval that conflicts with the specified physical register. 2091unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 2092 unsigned PhysReg) const { 2093 unsigned NumConflicts = 0; 2094 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 2095 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2096 E = mri_->reg_end(); I != E; ++I) { 2097 MachineOperand &O = I.getOperand(); 2098 MachineInstr *MI = O.getParent(); 2099 if (MI->isDebugValue()) 2100 continue; 2101 SlotIndex Index = getInstructionIndex(MI); 2102 if (pli.liveAt(Index)) 2103 ++NumConflicts; 2104 } 2105 return NumConflicts; 2106} 2107 2108/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 2109/// around all defs and uses of the specified interval. Return true if it 2110/// was able to cut its interval. 2111bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 2112 unsigned PhysReg, VirtRegMap &vrm) { 2113 unsigned SpillReg = getRepresentativeReg(PhysReg); 2114 2115 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 2116 // If there are registers which alias PhysReg, but which are not a 2117 // sub-register of the chosen representative super register. Assert 2118 // since we can't handle it yet. 2119 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 2120 tri_->isSuperRegister(*AS, SpillReg)); 2121 2122 bool Cut = false; 2123 SmallVector<unsigned, 4> PRegs; 2124 if (hasInterval(SpillReg)) 2125 PRegs.push_back(SpillReg); 2126 else { 2127 SmallSet<unsigned, 4> Added; 2128 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 2129 if (Added.insert(*AS) && hasInterval(*AS)) { 2130 PRegs.push_back(*AS); 2131 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 2132 Added.insert(*ASS); 2133 } 2134 } 2135 2136 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2137 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2138 E = mri_->reg_end(); I != E; ++I) { 2139 MachineOperand &O = I.getOperand(); 2140 MachineInstr *MI = O.getParent(); 2141 if (MI->isDebugValue() || SeenMIs.count(MI)) 2142 continue; 2143 SeenMIs.insert(MI); 2144 SlotIndex Index = getInstructionIndex(MI); 2145 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 2146 unsigned PReg = PRegs[i]; 2147 LiveInterval &pli = getInterval(PReg); 2148 if (!pli.liveAt(Index)) 2149 continue; 2150 vrm.addEmergencySpill(PReg, MI); 2151 SlotIndex StartIdx = Index.getLoadIndex(); 2152 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 2153 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 2154 pli.removeRange(StartIdx, EndIdx); 2155 Cut = true; 2156 } else { 2157 std::string msg; 2158 raw_string_ostream Msg(msg); 2159 Msg << "Ran out of registers during register allocation!"; 2160 if (MI->isInlineAsm()) { 2161 Msg << "\nPlease check your inline asm statement for invalid " 2162 << "constraints:\n"; 2163 MI->print(Msg, tm_); 2164 } 2165 report_fatal_error(Msg.str()); 2166 } 2167 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 2168 if (!hasInterval(*AS)) 2169 continue; 2170 LiveInterval &spli = getInterval(*AS); 2171 if (spli.liveAt(Index)) 2172 spli.removeRange(Index.getLoadIndex(), 2173 Index.getNextIndex().getBaseIndex()); 2174 } 2175 } 2176 } 2177 return Cut; 2178} 2179 2180LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2181 MachineInstr* startInst) { 2182 LiveInterval& Interval = getOrCreateInterval(reg); 2183 VNInfo* VN = Interval.getNextValue( 2184 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2185 startInst, true, getVNInfoAllocator()); 2186 VN->setHasPHIKill(true); 2187 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); 2188 LiveRange LR( 2189 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2190 getMBBEndIdx(startInst->getParent()), VN); 2191 Interval.addRange(LR); 2192 2193 return LR; 2194} 2195 2196