LiveIntervalAnalysis.cpp revision b8c57c8a979c5b5e0176b0027019f1b574b1af27
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53STATISTIC(numIntervals , "Number of original intervals"); 54STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 55STATISTIC(numSplits , "Number of intervals split"); 56 57char LiveIntervals::ID = 0; 58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 59 60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.setPreservesCFG(); 62 AU.addRequired<AliasAnalysis>(); 63 AU.addPreserved<AliasAnalysis>(); 64 AU.addPreserved<LiveVariables>(); 65 AU.addRequired<LiveVariables>(); 66 AU.addPreservedID(MachineLoopInfoID); 67 AU.addPreservedID(MachineDominatorsID); 68 69 if (!StrongPHIElim) { 70 AU.addPreservedID(PHIEliminationID); 71 AU.addRequiredID(PHIEliminationID); 72 } 73 74 AU.addRequiredID(TwoAddressInstructionPassID); 75 AU.addPreserved<ProcessImplicitDefs>(); 76 AU.addRequired<ProcessImplicitDefs>(); 77 AU.addPreserved<SlotIndexes>(); 78 AU.addRequiredTransitive<SlotIndexes>(); 79 MachineFunctionPass::getAnalysisUsage(AU); 80} 81 82void LiveIntervals::releaseMemory() { 83 // Free the live intervals themselves. 84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 85 E = r2iMap_.end(); I != E; ++I) 86 delete I->second; 87 88 r2iMap_.clear(); 89 90 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. 91 VNInfoAllocator.Reset(); 92 while (!CloneMIs.empty()) { 93 MachineInstr *MI = CloneMIs.back(); 94 CloneMIs.pop_back(); 95 mf_->DeleteMachineInstr(MI); 96 } 97} 98 99/// runOnMachineFunction - Register allocate the whole function 100/// 101bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 102 mf_ = &fn; 103 mri_ = &mf_->getRegInfo(); 104 tm_ = &fn.getTarget(); 105 tri_ = tm_->getRegisterInfo(); 106 tii_ = tm_->getInstrInfo(); 107 aa_ = &getAnalysis<AliasAnalysis>(); 108 lv_ = &getAnalysis<LiveVariables>(); 109 indexes_ = &getAnalysis<SlotIndexes>(); 110 allocatableRegs_ = tri_->getAllocatableSet(fn); 111 112 computeIntervals(); 113 114 numIntervals += getNumIntervals(); 115 116 DEBUG(dump()); 117 return true; 118} 119 120/// print - Implement the dump method. 121void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 122 OS << "********** INTERVALS **********\n"; 123 for (const_iterator I = begin(), E = end(); I != E; ++I) { 124 I->second->print(OS, tri_); 125 OS << "\n"; 126 } 127 128 printInstrs(OS); 129} 130 131void LiveIntervals::printInstrs(raw_ostream &OS) const { 132 OS << "********** MACHINEINSTRS **********\n"; 133 134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 135 mbbi != mbbe; ++mbbi) { 136 OS << "BB#" << mbbi->getNumber() 137 << ":\t\t# derived from " << mbbi->getName() << "\n"; 138 for (MachineBasicBlock::iterator mii = mbbi->begin(), 139 mie = mbbi->end(); mii != mie; ++mii) { 140 if (mii->isDebugValue()) 141 OS << " \t" << *mii; 142 else 143 OS << getInstructionIndex(mii) << '\t' << *mii; 144 } 145 } 146} 147 148void LiveIntervals::dumpInstrs() const { 149 printInstrs(dbgs()); 150} 151 152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 153 VirtRegMap &vrm, unsigned reg) { 154 // We don't handle fancy stuff crossing basic block boundaries 155 if (li.ranges.size() != 1) 156 return true; 157 const LiveRange &range = li.ranges.front(); 158 SlotIndex idx = range.start.getBaseIndex(); 159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 160 161 // Skip deleted instructions 162 MachineInstr *firstMI = getInstructionFromIndex(idx); 163 while (!firstMI && idx != end) { 164 idx = idx.getNextIndex(); 165 firstMI = getInstructionFromIndex(idx); 166 } 167 if (!firstMI) 168 return false; 169 170 // Find last instruction in range 171 SlotIndex lastIdx = end.getPrevIndex(); 172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 173 while (!lastMI && lastIdx != idx) { 174 lastIdx = lastIdx.getPrevIndex(); 175 lastMI = getInstructionFromIndex(lastIdx); 176 } 177 if (!lastMI) 178 return false; 179 180 // Range cannot cross basic block boundaries or terminators 181 MachineBasicBlock *MBB = firstMI->getParent(); 182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 183 return true; 184 185 MachineBasicBlock::const_iterator E = lastMI; 186 ++E; 187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 188 const MachineInstr &MI = *I; 189 190 // Allow copies to and from li.reg 191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 193 if (SrcReg == li.reg || DstReg == li.reg) 194 continue; 195 196 // Check for operands using reg 197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 198 const MachineOperand& mop = MI.getOperand(i); 199 if (!mop.isReg()) 200 continue; 201 unsigned PhysReg = mop.getReg(); 202 if (PhysReg == 0 || PhysReg == li.reg) 203 continue; 204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 205 if (!vrm.hasPhys(PhysReg)) 206 continue; 207 PhysReg = vrm.getPhys(PhysReg); 208 } 209 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 210 return true; 211 } 212 } 213 214 // No conflicts found. 215 return false; 216} 217 218bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg, 219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 220 for (LiveInterval::Ranges::const_iterator 221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 222 for (SlotIndex index = I->start.getBaseIndex(), 223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 224 index != end; 225 index = index.getNextIndex()) { 226 MachineInstr *MI = getInstructionFromIndex(index); 227 if (!MI) 228 continue; // skip deleted instructions 229 230 if (JoinedCopies.count(MI)) 231 continue; 232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 233 MachineOperand& MO = MI->getOperand(i); 234 if (!MO.isReg()) 235 continue; 236 unsigned PhysReg = MO.getReg(); 237 if (PhysReg == 0 || PhysReg == Reg || 238 TargetRegisterInfo::isVirtualRegister(PhysReg)) 239 continue; 240 if (tri_->regsOverlap(Reg, PhysReg)) 241 return true; 242 } 243 } 244 } 245 246 return false; 247} 248 249#ifndef NDEBUG 250static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 251 if (TargetRegisterInfo::isPhysicalRegister(reg)) 252 dbgs() << tri_->getName(reg); 253 else 254 dbgs() << "%reg" << reg; 255} 256#endif 257 258static 259bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 260 unsigned Reg = MI.getOperand(MOIdx).getReg(); 261 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 262 const MachineOperand &MO = MI.getOperand(i); 263 if (!MO.isReg()) 264 continue; 265 if (MO.getReg() == Reg && MO.isDef()) { 266 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 267 MI.getOperand(MOIdx).getSubReg() && 268 (MO.getSubReg() || MO.isImplicit())); 269 return true; 270 } 271 } 272 return false; 273} 274 275/// isPartialRedef - Return true if the specified def at the specific index is 276/// partially re-defining the specified live interval. A common case of this is 277/// a definition of the sub-register. 278bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 279 LiveInterval &interval) { 280 if (!MO.getSubReg() || MO.isEarlyClobber()) 281 return false; 282 283 SlotIndex RedefIndex = MIIdx.getDefIndex(); 284 const LiveRange *OldLR = 285 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 286 if (OldLR->valno->isDefAccurate()) { 287 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 288 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 289 } 290 return false; 291} 292 293void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 294 MachineBasicBlock::iterator mi, 295 SlotIndex MIIdx, 296 MachineOperand& MO, 297 unsigned MOIdx, 298 LiveInterval &interval) { 299 DEBUG({ 300 dbgs() << "\t\tregister: "; 301 printRegName(interval.reg, tri_); 302 }); 303 304 // Virtual registers may be defined multiple times (due to phi 305 // elimination and 2-addr elimination). Much of what we do only has to be 306 // done once for the vreg. We use an empty interval to detect the first 307 // time we see a vreg. 308 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 309 if (interval.empty()) { 310 // Get the Idx of the defining instructions. 311 SlotIndex defIndex = MIIdx.getDefIndex(); 312 // Earlyclobbers move back one, so that they overlap the live range 313 // of inputs. 314 if (MO.isEarlyClobber()) 315 defIndex = MIIdx.getUseIndex(); 316 317 // Make sure the first definition is not a partial redefinition. Add an 318 // <imp-def> of the full register. 319 if (MO.getSubReg()) 320 mi->addRegisterDefined(interval.reg); 321 322 MachineInstr *CopyMI = NULL; 323 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 324 if (mi->isCopyLike() || 325 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) { 326 CopyMI = mi; 327 328 // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates 329 // implicit defs without really knowing. It shows up as INSERT_SUBREG 330 // using an undefined register. 331 if (mi->isInsertSubreg()) 332 mi->getOperand(1).setIsUndef(); 333 } 334 335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, 336 VNInfoAllocator); 337 assert(ValNo->id == 0 && "First value in interval is not 0?"); 338 339 // Loop over all of the blocks that the vreg is defined in. There are 340 // two cases we have to handle here. The most common case is a vreg 341 // whose lifetime is contained within a basic block. In this case there 342 // will be a single kill, in MBB, which comes after the definition. 343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 344 // FIXME: what about dead vars? 345 SlotIndex killIdx; 346 if (vi.Kills[0] != mi) 347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 348 else 349 killIdx = defIndex.getStoreIndex(); 350 351 // If the kill happens after the definition, we have an intra-block 352 // live range. 353 if (killIdx > defIndex) { 354 assert(vi.AliveBlocks.empty() && 355 "Shouldn't be alive across any blocks!"); 356 LiveRange LR(defIndex, killIdx, ValNo); 357 interval.addRange(LR); 358 DEBUG(dbgs() << " +" << LR << "\n"); 359 return; 360 } 361 } 362 363 // The other case we handle is when a virtual register lives to the end 364 // of the defining block, potentially live across some blocks, then is 365 // live into some number of blocks, but gets killed. Start by adding a 366 // range that goes from this definition to the end of the defining block. 367 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 368 DEBUG(dbgs() << " +" << NewLR); 369 interval.addRange(NewLR); 370 371 bool PHIJoin = lv_->isPHIJoin(interval.reg); 372 373 if (PHIJoin) { 374 // A phi join register is killed at the end of the MBB and revived as a new 375 // valno in the killing blocks. 376 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 377 DEBUG(dbgs() << " phi-join"); 378 ValNo->setHasPHIKill(true); 379 } else { 380 // Iterate over all of the blocks that the variable is completely 381 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 382 // live interval. 383 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 384 E = vi.AliveBlocks.end(); I != E; ++I) { 385 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); 386 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); 387 interval.addRange(LR); 388 DEBUG(dbgs() << " +" << LR); 389 } 390 } 391 392 // Finally, this virtual register is live from the start of any killing 393 // block to the 'use' slot of the killing instruction. 394 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 395 MachineInstr *Kill = vi.Kills[i]; 396 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 397 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); 398 399 // Create interval with one of a NEW value number. Note that this value 400 // number isn't actually defined by an instruction, weird huh? :) 401 if (PHIJoin) { 402 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, 403 VNInfoAllocator); 404 ValNo->setIsPHIDef(true); 405 } 406 LiveRange LR(Start, killIdx, ValNo); 407 interval.addRange(LR); 408 DEBUG(dbgs() << " +" << LR); 409 } 410 411 } else { 412 if (MultipleDefsBySameMI(*mi, MOIdx)) 413 // Multiple defs of the same virtual register by the same instruction. 414 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 415 // This is likely due to elimination of REG_SEQUENCE instructions. Return 416 // here since there is nothing to do. 417 return; 418 419 // If this is the second time we see a virtual register definition, it 420 // must be due to phi elimination or two addr elimination. If this is 421 // the result of two address elimination, then the vreg is one of the 422 // def-and-use register operand. 423 424 // It may also be partial redef like this: 425 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 426 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 427 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 428 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 429 // If this is a two-address definition, then we have already processed 430 // the live range. The only problem is that we didn't realize there 431 // are actually two values in the live interval. Because of this we 432 // need to take the LiveRegion that defines this register and split it 433 // into two values. 434 SlotIndex RedefIndex = MIIdx.getDefIndex(); 435 if (MO.isEarlyClobber()) 436 RedefIndex = MIIdx.getUseIndex(); 437 438 const LiveRange *OldLR = 439 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 440 VNInfo *OldValNo = OldLR->valno; 441 SlotIndex DefIndex = OldValNo->def.getDefIndex(); 442 443 // Delete the previous value, which should be short and continuous, 444 // because the 2-addr copy must be in the same MBB as the redef. 445 interval.removeRange(DefIndex, RedefIndex); 446 447 // The new value number (#1) is defined by the instruction we claimed 448 // defined value #0. 449 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 450 false, // update at * 451 VNInfoAllocator); 452 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 453 454 // Value#0 is now defined by the 2-addr instruction. 455 OldValNo->def = RedefIndex; 456 OldValNo->setCopy(0); 457 458 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ... 459 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 460 if (PartReDef && (mi->isCopyLike() || 461 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))) 462 OldValNo->setCopy(&*mi); 463 464 // Add the new live interval which replaces the range for the input copy. 465 LiveRange LR(DefIndex, RedefIndex, ValNo); 466 DEBUG(dbgs() << " replace range with " << LR); 467 interval.addRange(LR); 468 469 // If this redefinition is dead, we need to add a dummy unit live 470 // range covering the def slot. 471 if (MO.isDead()) 472 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 473 OldValNo)); 474 475 DEBUG({ 476 dbgs() << " RESULT: "; 477 interval.print(dbgs(), tri_); 478 }); 479 } else if (lv_->isPHIJoin(interval.reg)) { 480 // In the case of PHI elimination, each variable definition is only 481 // live until the end of the block. We've already taken care of the 482 // rest of the live range. 483 484 SlotIndex defIndex = MIIdx.getDefIndex(); 485 if (MO.isEarlyClobber()) 486 defIndex = MIIdx.getUseIndex(); 487 488 VNInfo *ValNo; 489 MachineInstr *CopyMI = NULL; 490 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 491 if (mi->isCopyLike() || 492 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 493 CopyMI = mi; 494 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 495 496 SlotIndex killIndex = getMBBEndIdx(mbb); 497 LiveRange LR(defIndex, killIndex, ValNo); 498 interval.addRange(LR); 499 ValNo->setHasPHIKill(true); 500 DEBUG(dbgs() << " phi-join +" << LR); 501 } else { 502 llvm_unreachable("Multiply defined register"); 503 } 504 } 505 506 DEBUG(dbgs() << '\n'); 507} 508 509void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 510 MachineBasicBlock::iterator mi, 511 SlotIndex MIIdx, 512 MachineOperand& MO, 513 LiveInterval &interval, 514 MachineInstr *CopyMI) { 515 // A physical register cannot be live across basic block, so its 516 // lifetime must end somewhere in its defining basic block. 517 DEBUG({ 518 dbgs() << "\t\tregister: "; 519 printRegName(interval.reg, tri_); 520 }); 521 522 SlotIndex baseIndex = MIIdx; 523 SlotIndex start = baseIndex.getDefIndex(); 524 // Earlyclobbers move back one. 525 if (MO.isEarlyClobber()) 526 start = MIIdx.getUseIndex(); 527 SlotIndex end = start; 528 529 // If it is not used after definition, it is considered dead at 530 // the instruction defining it. Hence its interval is: 531 // [defSlot(def), defSlot(def)+1) 532 // For earlyclobbers, the defSlot was pushed back one; the extra 533 // advance below compensates. 534 if (MO.isDead()) { 535 DEBUG(dbgs() << " dead"); 536 end = start.getStoreIndex(); 537 goto exit; 538 } 539 540 // If it is not dead on definition, it must be killed by a 541 // subsequent instruction. Hence its interval is: 542 // [defSlot(def), useSlot(kill)+1) 543 baseIndex = baseIndex.getNextIndex(); 544 while (++mi != MBB->end()) { 545 546 if (mi->isDebugValue()) 547 continue; 548 if (getInstructionFromIndex(baseIndex) == 0) 549 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 550 551 if (mi->killsRegister(interval.reg, tri_)) { 552 DEBUG(dbgs() << " killed"); 553 end = baseIndex.getDefIndex(); 554 goto exit; 555 } else { 556 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_); 557 if (DefIdx != -1) { 558 if (mi->isRegTiedToUseOperand(DefIdx)) { 559 // Two-address instruction. 560 end = baseIndex.getDefIndex(); 561 } else { 562 // Another instruction redefines the register before it is ever read. 563 // Then the register is essentially dead at the instruction that 564 // defines it. Hence its interval is: 565 // [defSlot(def), defSlot(def)+1) 566 DEBUG(dbgs() << " dead"); 567 end = start.getStoreIndex(); 568 } 569 goto exit; 570 } 571 } 572 573 baseIndex = baseIndex.getNextIndex(); 574 } 575 576 // The only case we should have a dead physreg here without a killing or 577 // instruction where we know it's dead is if it is live-in to the function 578 // and never used. Another possible case is the implicit use of the 579 // physical register has been deleted by two-address pass. 580 end = start.getStoreIndex(); 581 582exit: 583 assert(start < end && "did not find end of interval?"); 584 585 // Already exists? Extend old live interval. 586 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 587 bool Extend = OldLR != interval.end(); 588 VNInfo *ValNo = Extend 589 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 590 if (MO.isEarlyClobber() && Extend) 591 ValNo->setHasRedefByEC(true); 592 LiveRange LR(start, end, ValNo); 593 interval.addRange(LR); 594 DEBUG(dbgs() << " +" << LR << '\n'); 595} 596 597void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 598 MachineBasicBlock::iterator MI, 599 SlotIndex MIIdx, 600 MachineOperand& MO, 601 unsigned MOIdx) { 602 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 603 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 604 getOrCreateInterval(MO.getReg())); 605 else if (allocatableRegs_[MO.getReg()]) { 606 MachineInstr *CopyMI = NULL; 607 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 608 if (MI->isCopyLike() || 609 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 610 CopyMI = MI; 611 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 612 getOrCreateInterval(MO.getReg()), CopyMI); 613 // Def of a register also defines its sub-registers. 614 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 615 // If MI also modifies the sub-register explicitly, avoid processing it 616 // more than once. Do not pass in TRI here so it checks for exact match. 617 if (!MI->definesRegister(*AS)) 618 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 619 getOrCreateInterval(*AS), 0); 620 } 621} 622 623void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 624 SlotIndex MIIdx, 625 LiveInterval &interval, bool isAlias) { 626 DEBUG({ 627 dbgs() << "\t\tlivein register: "; 628 printRegName(interval.reg, tri_); 629 }); 630 631 // Look for kills, if it reaches a def before it's killed, then it shouldn't 632 // be considered a livein. 633 MachineBasicBlock::iterator mi = MBB->begin(); 634 MachineBasicBlock::iterator E = MBB->end(); 635 // Skip over DBG_VALUE at the start of the MBB. 636 if (mi != E && mi->isDebugValue()) { 637 while (++mi != E && mi->isDebugValue()) 638 ; 639 if (mi == E) 640 // MBB is empty except for DBG_VALUE's. 641 return; 642 } 643 644 SlotIndex baseIndex = MIIdx; 645 SlotIndex start = baseIndex; 646 if (getInstructionFromIndex(baseIndex) == 0) 647 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 648 649 SlotIndex end = baseIndex; 650 bool SeenDefUse = false; 651 652 while (mi != E) { 653 if (mi->killsRegister(interval.reg, tri_)) { 654 DEBUG(dbgs() << " killed"); 655 end = baseIndex.getDefIndex(); 656 SeenDefUse = true; 657 break; 658 } else if (mi->definesRegister(interval.reg, tri_)) { 659 // Another instruction redefines the register before it is ever read. 660 // Then the register is essentially dead at the instruction that defines 661 // it. Hence its interval is: 662 // [defSlot(def), defSlot(def)+1) 663 DEBUG(dbgs() << " dead"); 664 end = start.getStoreIndex(); 665 SeenDefUse = true; 666 break; 667 } 668 669 while (++mi != E && mi->isDebugValue()) 670 // Skip over DBG_VALUE. 671 ; 672 if (mi != E) 673 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 674 } 675 676 // Live-in register might not be used at all. 677 if (!SeenDefUse) { 678 if (isAlias) { 679 DEBUG(dbgs() << " dead"); 680 end = MIIdx.getStoreIndex(); 681 } else { 682 DEBUG(dbgs() << " live through"); 683 end = baseIndex; 684 } 685 } 686 687 VNInfo *vni = 688 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 689 0, false, VNInfoAllocator); 690 vni->setIsPHIDef(true); 691 LiveRange LR(start, end, vni); 692 693 interval.addRange(LR); 694 DEBUG(dbgs() << " +" << LR << '\n'); 695} 696 697/// computeIntervals - computes the live intervals for virtual 698/// registers. for some ordering of the machine instructions [1,N] a 699/// live interval is an interval [i, j) where 1 <= i <= j < N for 700/// which a variable is live 701void LiveIntervals::computeIntervals() { 702 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 703 << "********** Function: " 704 << ((Value*)mf_->getFunction())->getName() << '\n'); 705 706 SmallVector<unsigned, 8> UndefUses; 707 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 708 MBBI != E; ++MBBI) { 709 MachineBasicBlock *MBB = MBBI; 710 if (MBB->empty()) 711 continue; 712 713 // Track the index of the current machine instr. 714 SlotIndex MIIndex = getMBBStartIdx(MBB); 715 DEBUG(dbgs() << "BB#" << MBB->getNumber() 716 << ":\t\t# derived from " << MBB->getName() << "\n"); 717 718 // Create intervals for live-ins to this BB first. 719 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 720 LE = MBB->livein_end(); LI != LE; ++LI) { 721 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 722 // Multiple live-ins can alias the same register. 723 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 724 if (!hasInterval(*AS)) 725 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 726 true); 727 } 728 729 // Skip over empty initial indices. 730 if (getInstructionFromIndex(MIIndex) == 0) 731 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 732 733 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 734 MI != miEnd; ++MI) { 735 DEBUG(dbgs() << MIIndex << "\t" << *MI); 736 if (MI->isDebugValue()) 737 continue; 738 739 // Handle defs. 740 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 741 MachineOperand &MO = MI->getOperand(i); 742 if (!MO.isReg() || !MO.getReg()) 743 continue; 744 745 // handle register defs - build intervals 746 if (MO.isDef()) 747 handleRegisterDef(MBB, MI, MIIndex, MO, i); 748 else if (MO.isUndef()) 749 UndefUses.push_back(MO.getReg()); 750 } 751 752 // Move to the next instr slot. 753 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 754 } 755 } 756 757 // Create empty intervals for registers defined by implicit_def's (except 758 // for those implicit_def that define values which are liveout of their 759 // blocks. 760 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 761 unsigned UndefReg = UndefUses[i]; 762 (void)getOrCreateInterval(UndefReg); 763 } 764} 765 766LiveInterval* LiveIntervals::createInterval(unsigned reg) { 767 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 768 return new LiveInterval(reg, Weight); 769} 770 771/// dupInterval - Duplicate a live interval. The caller is responsible for 772/// managing the allocated memory. 773LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 774 LiveInterval *NewLI = createInterval(li->reg); 775 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 776 return NewLI; 777} 778 779//===----------------------------------------------------------------------===// 780// Register allocator hooks. 781// 782 783/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 784/// allow one) virtual register operand, then its uses are implicitly using 785/// the register. Returns the virtual register. 786unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 787 MachineInstr *MI) const { 788 unsigned RegOp = 0; 789 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 790 MachineOperand &MO = MI->getOperand(i); 791 if (!MO.isReg() || !MO.isUse()) 792 continue; 793 unsigned Reg = MO.getReg(); 794 if (Reg == 0 || Reg == li.reg) 795 continue; 796 797 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 798 !allocatableRegs_[Reg]) 799 continue; 800 // FIXME: For now, only remat MI with at most one register operand. 801 assert(!RegOp && 802 "Can't rematerialize instruction with multiple register operand!"); 803 RegOp = MO.getReg(); 804#ifndef NDEBUG 805 break; 806#endif 807 } 808 return RegOp; 809} 810 811/// isValNoAvailableAt - Return true if the val# of the specified interval 812/// which reaches the given instruction also reaches the specified use index. 813bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 814 SlotIndex UseIdx) const { 815 SlotIndex Index = getInstructionIndex(MI); 816 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 817 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 818 return UI != li.end() && UI->valno == ValNo; 819} 820 821/// isReMaterializable - Returns true if the definition MI of the specified 822/// val# of the specified interval is re-materializable. 823bool LiveIntervals::isReMaterializable(const LiveInterval &li, 824 const VNInfo *ValNo, MachineInstr *MI, 825 SmallVectorImpl<LiveInterval*> &SpillIs, 826 bool &isLoad) { 827 if (DisableReMat) 828 return false; 829 830 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 831 return false; 832 833 // Target-specific code can mark an instruction as being rematerializable 834 // if it has one virtual reg use, though it had better be something like 835 // a PIC base register which is likely to be live everywhere. 836 unsigned ImpUse = getReMatImplicitUse(li, MI); 837 if (ImpUse) { 838 const LiveInterval &ImpLi = getInterval(ImpUse); 839 for (MachineRegisterInfo::use_nodbg_iterator 840 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end(); 841 ri != re; ++ri) { 842 MachineInstr *UseMI = &*ri; 843 SlotIndex UseIdx = getInstructionIndex(UseMI); 844 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 845 continue; 846 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 847 return false; 848 } 849 850 // If a register operand of the re-materialized instruction is going to 851 // be spilled next, then it's not legal to re-materialize this instruction. 852 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 853 if (ImpUse == SpillIs[i]->reg) 854 return false; 855 } 856 return true; 857} 858 859/// isReMaterializable - Returns true if the definition MI of the specified 860/// val# of the specified interval is re-materializable. 861bool LiveIntervals::isReMaterializable(const LiveInterval &li, 862 const VNInfo *ValNo, MachineInstr *MI) { 863 SmallVector<LiveInterval*, 4> Dummy1; 864 bool Dummy2; 865 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 866} 867 868/// isReMaterializable - Returns true if every definition of MI of every 869/// val# of the specified interval is re-materializable. 870bool LiveIntervals::isReMaterializable(const LiveInterval &li, 871 SmallVectorImpl<LiveInterval*> &SpillIs, 872 bool &isLoad) { 873 isLoad = false; 874 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 875 i != e; ++i) { 876 const VNInfo *VNI = *i; 877 if (VNI->isUnused()) 878 continue; // Dead val#. 879 // Is the def for the val# rematerializable? 880 if (!VNI->isDefAccurate()) 881 return false; 882 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 883 bool DefIsLoad = false; 884 if (!ReMatDefMI || 885 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 886 return false; 887 isLoad |= DefIsLoad; 888 } 889 return true; 890} 891 892/// FilterFoldedOps - Filter out two-address use operands. Return 893/// true if it finds any issue with the operands that ought to prevent 894/// folding. 895static bool FilterFoldedOps(MachineInstr *MI, 896 SmallVector<unsigned, 2> &Ops, 897 unsigned &MRInfo, 898 SmallVector<unsigned, 2> &FoldOps) { 899 MRInfo = 0; 900 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 901 unsigned OpIdx = Ops[i]; 902 MachineOperand &MO = MI->getOperand(OpIdx); 903 // FIXME: fold subreg use. 904 if (MO.getSubReg()) 905 return true; 906 if (MO.isDef()) 907 MRInfo |= (unsigned)VirtRegMap::isMod; 908 else { 909 // Filter out two-address use operand(s). 910 if (MI->isRegTiedToDefOperand(OpIdx)) { 911 MRInfo = VirtRegMap::isModRef; 912 continue; 913 } 914 MRInfo |= (unsigned)VirtRegMap::isRef; 915 } 916 FoldOps.push_back(OpIdx); 917 } 918 return false; 919} 920 921 922/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 923/// slot / to reg or any rematerialized load into ith operand of specified 924/// MI. If it is successul, MI is updated with the newly created MI and 925/// returns true. 926bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 927 VirtRegMap &vrm, MachineInstr *DefMI, 928 SlotIndex InstrIdx, 929 SmallVector<unsigned, 2> &Ops, 930 bool isSS, int Slot, unsigned Reg) { 931 // If it is an implicit def instruction, just delete it. 932 if (MI->isImplicitDef()) { 933 RemoveMachineInstrFromMaps(MI); 934 vrm.RemoveMachineInstrFromMaps(MI); 935 MI->eraseFromParent(); 936 ++numFolds; 937 return true; 938 } 939 940 // Filter the list of operand indexes that are to be folded. Abort if 941 // any operand will prevent folding. 942 unsigned MRInfo = 0; 943 SmallVector<unsigned, 2> FoldOps; 944 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 945 return false; 946 947 // The only time it's safe to fold into a two address instruction is when 948 // it's folding reload and spill from / into a spill stack slot. 949 if (DefMI && (MRInfo & VirtRegMap::isMod)) 950 return false; 951 952 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 953 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 954 if (fmi) { 955 // Remember this instruction uses the spill slot. 956 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 957 958 // Attempt to fold the memory reference into the instruction. If 959 // we can do this, we don't need to insert spill code. 960 MachineBasicBlock &MBB = *MI->getParent(); 961 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 962 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 963 vrm.transferSpillPts(MI, fmi); 964 vrm.transferRestorePts(MI, fmi); 965 vrm.transferEmergencySpills(MI, fmi); 966 ReplaceMachineInstrInMaps(MI, fmi); 967 MI = MBB.insert(MBB.erase(MI), fmi); 968 ++numFolds; 969 return true; 970 } 971 return false; 972} 973 974/// canFoldMemoryOperand - Returns true if the specified load / store 975/// folding is possible. 976bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 977 SmallVector<unsigned, 2> &Ops, 978 bool ReMat) const { 979 // Filter the list of operand indexes that are to be folded. Abort if 980 // any operand will prevent folding. 981 unsigned MRInfo = 0; 982 SmallVector<unsigned, 2> FoldOps; 983 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 984 return false; 985 986 // It's only legal to remat for a use, not a def. 987 if (ReMat && (MRInfo & VirtRegMap::isMod)) 988 return false; 989 990 return tii_->canFoldMemoryOperand(MI, FoldOps); 991} 992 993bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 994 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 995 996 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 997 998 if (mbb == 0) 999 return false; 1000 1001 for (++itr; itr != li.ranges.end(); ++itr) { 1002 MachineBasicBlock *mbb2 = 1003 indexes_->getMBBCoveringRange(itr->start, itr->end); 1004 1005 if (mbb2 != mbb) 1006 return false; 1007 } 1008 1009 return true; 1010} 1011 1012/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1013/// interval on to-be re-materialized operands of MI) with new register. 1014void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1015 MachineInstr *MI, unsigned NewVReg, 1016 VirtRegMap &vrm) { 1017 // There is an implicit use. That means one of the other operand is 1018 // being remat'ed and the remat'ed instruction has li.reg as an 1019 // use operand. Make sure we rewrite that as well. 1020 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1021 MachineOperand &MO = MI->getOperand(i); 1022 if (!MO.isReg()) 1023 continue; 1024 unsigned Reg = MO.getReg(); 1025 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1026 continue; 1027 if (!vrm.isReMaterialized(Reg)) 1028 continue; 1029 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1030 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1031 if (UseMO) 1032 UseMO->setReg(NewVReg); 1033 } 1034} 1035 1036/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1037/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1038bool LiveIntervals:: 1039rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1040 bool TrySplit, SlotIndex index, SlotIndex end, 1041 MachineInstr *MI, 1042 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1043 unsigned Slot, int LdSlot, 1044 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1045 VirtRegMap &vrm, 1046 const TargetRegisterClass* rc, 1047 SmallVector<int, 4> &ReMatIds, 1048 const MachineLoopInfo *loopInfo, 1049 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1050 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1051 std::vector<LiveInterval*> &NewLIs) { 1052 bool CanFold = false; 1053 RestartInstruction: 1054 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1055 MachineOperand& mop = MI->getOperand(i); 1056 if (!mop.isReg()) 1057 continue; 1058 unsigned Reg = mop.getReg(); 1059 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1060 continue; 1061 if (Reg != li.reg) 1062 continue; 1063 1064 bool TryFold = !DefIsReMat; 1065 bool FoldSS = true; // Default behavior unless it's a remat. 1066 int FoldSlot = Slot; 1067 if (DefIsReMat) { 1068 // If this is the rematerializable definition MI itself and 1069 // all of its uses are rematerialized, simply delete it. 1070 if (MI == ReMatOrigDefMI && CanDelete) { 1071 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " 1072 << *MI << '\n'); 1073 RemoveMachineInstrFromMaps(MI); 1074 vrm.RemoveMachineInstrFromMaps(MI); 1075 MI->eraseFromParent(); 1076 break; 1077 } 1078 1079 // If def for this use can't be rematerialized, then try folding. 1080 // If def is rematerializable and it's a load, also try folding. 1081 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1082 if (isLoad) { 1083 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1084 FoldSS = isLoadSS; 1085 FoldSlot = LdSlot; 1086 } 1087 } 1088 1089 // Scan all of the operands of this instruction rewriting operands 1090 // to use NewVReg instead of li.reg as appropriate. We do this for 1091 // two reasons: 1092 // 1093 // 1. If the instr reads the same spilled vreg multiple times, we 1094 // want to reuse the NewVReg. 1095 // 2. If the instr is a two-addr instruction, we are required to 1096 // keep the src/dst regs pinned. 1097 // 1098 // Keep track of whether we replace a use and/or def so that we can 1099 // create the spill interval with the appropriate range. 1100 SmallVector<unsigned, 2> Ops; 1101 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops); 1102 1103 // Create a new virtual register for the spill interval. 1104 // Create the new register now so we can map the fold instruction 1105 // to the new register so when it is unfolded we get the correct 1106 // answer. 1107 bool CreatedNewVReg = false; 1108 if (NewVReg == 0) { 1109 NewVReg = mri_->createVirtualRegister(rc); 1110 vrm.grow(); 1111 CreatedNewVReg = true; 1112 1113 // The new virtual register should get the same allocation hints as the 1114 // old one. 1115 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1116 if (Hint.first || Hint.second) 1117 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1118 } 1119 1120 if (!TryFold) 1121 CanFold = false; 1122 else { 1123 // Do not fold load / store here if we are splitting. We'll find an 1124 // optimal point to insert a load / store later. 1125 if (!TrySplit) { 1126 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1127 Ops, FoldSS, FoldSlot, NewVReg)) { 1128 // Folding the load/store can completely change the instruction in 1129 // unpredictable ways, rescan it from the beginning. 1130 1131 if (FoldSS) { 1132 // We need to give the new vreg the same stack slot as the 1133 // spilled interval. 1134 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1135 } 1136 1137 HasUse = false; 1138 HasDef = false; 1139 CanFold = false; 1140 if (isNotInMIMap(MI)) 1141 break; 1142 goto RestartInstruction; 1143 } 1144 } else { 1145 // We'll try to fold it later if it's profitable. 1146 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1147 } 1148 } 1149 1150 mop.setReg(NewVReg); 1151 if (mop.isImplicit()) 1152 rewriteImplicitOps(li, MI, NewVReg, vrm); 1153 1154 // Reuse NewVReg for other reads. 1155 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1156 MachineOperand &mopj = MI->getOperand(Ops[j]); 1157 mopj.setReg(NewVReg); 1158 if (mopj.isImplicit()) 1159 rewriteImplicitOps(li, MI, NewVReg, vrm); 1160 } 1161 1162 if (CreatedNewVReg) { 1163 if (DefIsReMat) { 1164 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1165 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1166 // Each valnum may have its own remat id. 1167 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1168 } else { 1169 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1170 } 1171 if (!CanDelete || (HasUse && HasDef)) { 1172 // If this is a two-addr instruction then its use operands are 1173 // rematerializable but its def is not. It should be assigned a 1174 // stack slot. 1175 vrm.assignVirt2StackSlot(NewVReg, Slot); 1176 } 1177 } else { 1178 vrm.assignVirt2StackSlot(NewVReg, Slot); 1179 } 1180 } else if (HasUse && HasDef && 1181 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1182 // If this interval hasn't been assigned a stack slot (because earlier 1183 // def is a deleted remat def), do it now. 1184 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1185 vrm.assignVirt2StackSlot(NewVReg, Slot); 1186 } 1187 1188 // Re-matting an instruction with virtual register use. Add the 1189 // register as an implicit use on the use MI. 1190 if (DefIsReMat && ImpUse) 1191 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1192 1193 // Create a new register interval for this spill / remat. 1194 LiveInterval &nI = getOrCreateInterval(NewVReg); 1195 if (CreatedNewVReg) { 1196 NewLIs.push_back(&nI); 1197 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1198 if (TrySplit) 1199 vrm.setIsSplitFromReg(NewVReg, li.reg); 1200 } 1201 1202 if (HasUse) { 1203 if (CreatedNewVReg) { 1204 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1205 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1206 DEBUG(dbgs() << " +" << LR); 1207 nI.addRange(LR); 1208 } else { 1209 // Extend the split live interval to this def / use. 1210 SlotIndex End = index.getDefIndex(); 1211 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1212 nI.getValNumInfo(nI.getNumValNums()-1)); 1213 DEBUG(dbgs() << " +" << LR); 1214 nI.addRange(LR); 1215 } 1216 } 1217 if (HasDef) { 1218 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1219 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1220 DEBUG(dbgs() << " +" << LR); 1221 nI.addRange(LR); 1222 } 1223 1224 DEBUG({ 1225 dbgs() << "\t\t\t\tAdded new interval: "; 1226 nI.print(dbgs(), tri_); 1227 dbgs() << '\n'; 1228 }); 1229 } 1230 return CanFold; 1231} 1232bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1233 const VNInfo *VNI, 1234 MachineBasicBlock *MBB, 1235 SlotIndex Idx) const { 1236 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB)); 1237} 1238 1239/// RewriteInfo - Keep track of machine instrs that will be rewritten 1240/// during spilling. 1241namespace { 1242 struct RewriteInfo { 1243 SlotIndex Index; 1244 MachineInstr *MI; 1245 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {} 1246 }; 1247 1248 struct RewriteInfoCompare { 1249 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1250 return LHS.Index < RHS.Index; 1251 } 1252 }; 1253} 1254 1255void LiveIntervals:: 1256rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1257 LiveInterval::Ranges::const_iterator &I, 1258 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1259 unsigned Slot, int LdSlot, 1260 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1261 VirtRegMap &vrm, 1262 const TargetRegisterClass* rc, 1263 SmallVector<int, 4> &ReMatIds, 1264 const MachineLoopInfo *loopInfo, 1265 BitVector &SpillMBBs, 1266 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1267 BitVector &RestoreMBBs, 1268 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1269 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1270 std::vector<LiveInterval*> &NewLIs) { 1271 bool AllCanFold = true; 1272 unsigned NewVReg = 0; 1273 SlotIndex start = I->start.getBaseIndex(); 1274 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1275 1276 // First collect all the def / use in this live range that will be rewritten. 1277 // Make sure they are sorted according to instruction index. 1278 std::vector<RewriteInfo> RewriteMIs; 1279 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1280 re = mri_->reg_end(); ri != re; ) { 1281 MachineInstr *MI = &*ri; 1282 MachineOperand &O = ri.getOperand(); 1283 ++ri; 1284 if (MI->isDebugValue()) { 1285 // Modify DBG_VALUE now that the value is in a spill slot. 1286 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { 1287 uint64_t Offset = MI->getOperand(1).getImm(); 1288 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1289 DebugLoc DL = MI->getDebugLoc(); 1290 int FI = isLoadSS ? LdSlot : (int)Slot; 1291 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, 1292 Offset, MDPtr, DL)) { 1293 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1294 ReplaceMachineInstrInMaps(MI, NewDV); 1295 MachineBasicBlock *MBB = MI->getParent(); 1296 MBB->insert(MBB->erase(MI), NewDV); 1297 continue; 1298 } 1299 } 1300 1301 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1302 RemoveMachineInstrFromMaps(MI); 1303 vrm.RemoveMachineInstrFromMaps(MI); 1304 MI->eraseFromParent(); 1305 continue; 1306 } 1307 assert(!(O.isImplicit() && O.isUse()) && 1308 "Spilling register that's used as implicit use?"); 1309 SlotIndex index = getInstructionIndex(MI); 1310 if (index < start || index >= end) 1311 continue; 1312 1313 if (O.isUndef()) 1314 // Must be defined by an implicit def. It should not be spilled. Note, 1315 // this is for correctness reason. e.g. 1316 // 8 %reg1024<def> = IMPLICIT_DEF 1317 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1318 // The live range [12, 14) are not part of the r1024 live interval since 1319 // it's defined by an implicit def. It will not conflicts with live 1320 // interval of r1025. Now suppose both registers are spilled, you can 1321 // easily see a situation where both registers are reloaded before 1322 // the INSERT_SUBREG and both target registers that would overlap. 1323 continue; 1324 RewriteMIs.push_back(RewriteInfo(index, MI)); 1325 } 1326 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1327 1328 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1329 // Now rewrite the defs and uses. 1330 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1331 RewriteInfo &rwi = RewriteMIs[i]; 1332 ++i; 1333 SlotIndex index = rwi.Index; 1334 MachineInstr *MI = rwi.MI; 1335 // If MI def and/or use the same register multiple times, then there 1336 // are multiple entries. 1337 while (i != e && RewriteMIs[i].MI == MI) { 1338 assert(RewriteMIs[i].Index == index); 1339 ++i; 1340 } 1341 MachineBasicBlock *MBB = MI->getParent(); 1342 1343 if (ImpUse && MI != ReMatDefMI) { 1344 // Re-matting an instruction with virtual register use. Prevent interval 1345 // from being spilled. 1346 getInterval(ImpUse).markNotSpillable(); 1347 } 1348 1349 unsigned MBBId = MBB->getNumber(); 1350 unsigned ThisVReg = 0; 1351 if (TrySplit) { 1352 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1353 if (NVI != MBBVRegsMap.end()) { 1354 ThisVReg = NVI->second; 1355 // One common case: 1356 // x = use 1357 // ... 1358 // ... 1359 // def = ... 1360 // = use 1361 // It's better to start a new interval to avoid artifically 1362 // extend the new interval. 1363 if (MI->readsWritesVirtualRegister(li.reg) == 1364 std::make_pair(false,true)) { 1365 MBBVRegsMap.erase(MBB->getNumber()); 1366 ThisVReg = 0; 1367 } 1368 } 1369 } 1370 1371 bool IsNew = ThisVReg == 0; 1372 if (IsNew) { 1373 // This ends the previous live interval. If all of its def / use 1374 // can be folded, give it a low spill weight. 1375 if (NewVReg && TrySplit && AllCanFold) { 1376 LiveInterval &nI = getOrCreateInterval(NewVReg); 1377 nI.weight /= 10.0F; 1378 } 1379 AllCanFold = true; 1380 } 1381 NewVReg = ThisVReg; 1382 1383 bool HasDef = false; 1384 bool HasUse = false; 1385 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1386 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1387 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1388 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1389 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1390 if (!HasDef && !HasUse) 1391 continue; 1392 1393 AllCanFold &= CanFold; 1394 1395 // Update weight of spill interval. 1396 LiveInterval &nI = getOrCreateInterval(NewVReg); 1397 if (!TrySplit) { 1398 // The spill weight is now infinity as it cannot be spilled again. 1399 nI.markNotSpillable(); 1400 continue; 1401 } 1402 1403 // Keep track of the last def and first use in each MBB. 1404 if (HasDef) { 1405 if (MI != ReMatOrigDefMI || !CanDelete) { 1406 bool HasKill = false; 1407 if (!HasUse) 1408 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1409 else { 1410 // If this is a two-address code, then this index starts a new VNInfo. 1411 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1412 if (VNI) 1413 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1414 } 1415 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1416 SpillIdxes.find(MBBId); 1417 if (!HasKill) { 1418 if (SII == SpillIdxes.end()) { 1419 std::vector<SRInfo> S; 1420 S.push_back(SRInfo(index, NewVReg, true)); 1421 SpillIdxes.insert(std::make_pair(MBBId, S)); 1422 } else if (SII->second.back().vreg != NewVReg) { 1423 SII->second.push_back(SRInfo(index, NewVReg, true)); 1424 } else if (index > SII->second.back().index) { 1425 // If there is an earlier def and this is a two-address 1426 // instruction, then it's not possible to fold the store (which 1427 // would also fold the load). 1428 SRInfo &Info = SII->second.back(); 1429 Info.index = index; 1430 Info.canFold = !HasUse; 1431 } 1432 SpillMBBs.set(MBBId); 1433 } else if (SII != SpillIdxes.end() && 1434 SII->second.back().vreg == NewVReg && 1435 index > SII->second.back().index) { 1436 // There is an earlier def that's not killed (must be two-address). 1437 // The spill is no longer needed. 1438 SII->second.pop_back(); 1439 if (SII->second.empty()) { 1440 SpillIdxes.erase(MBBId); 1441 SpillMBBs.reset(MBBId); 1442 } 1443 } 1444 } 1445 } 1446 1447 if (HasUse) { 1448 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1449 SpillIdxes.find(MBBId); 1450 if (SII != SpillIdxes.end() && 1451 SII->second.back().vreg == NewVReg && 1452 index > SII->second.back().index) 1453 // Use(s) following the last def, it's not safe to fold the spill. 1454 SII->second.back().canFold = false; 1455 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1456 RestoreIdxes.find(MBBId); 1457 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1458 // If we are splitting live intervals, only fold if it's the first 1459 // use and there isn't another use later in the MBB. 1460 RII->second.back().canFold = false; 1461 else if (IsNew) { 1462 // Only need a reload if there isn't an earlier def / use. 1463 if (RII == RestoreIdxes.end()) { 1464 std::vector<SRInfo> Infos; 1465 Infos.push_back(SRInfo(index, NewVReg, true)); 1466 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1467 } else { 1468 RII->second.push_back(SRInfo(index, NewVReg, true)); 1469 } 1470 RestoreMBBs.set(MBBId); 1471 } 1472 } 1473 1474 // Update spill weight. 1475 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1476 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1477 } 1478 1479 if (NewVReg && TrySplit && AllCanFold) { 1480 // If all of its def / use can be folded, give it a low spill weight. 1481 LiveInterval &nI = getOrCreateInterval(NewVReg); 1482 nI.weight /= 10.0F; 1483 } 1484} 1485 1486bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1487 unsigned vr, BitVector &RestoreMBBs, 1488 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1489 if (!RestoreMBBs[Id]) 1490 return false; 1491 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1492 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1493 if (Restores[i].index == index && 1494 Restores[i].vreg == vr && 1495 Restores[i].canFold) 1496 return true; 1497 return false; 1498} 1499 1500void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1501 unsigned vr, BitVector &RestoreMBBs, 1502 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1503 if (!RestoreMBBs[Id]) 1504 return; 1505 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1506 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1507 if (Restores[i].index == index && Restores[i].vreg) 1508 Restores[i].index = SlotIndex(); 1509} 1510 1511/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1512/// spilled and create empty intervals for their uses. 1513void 1514LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1515 const TargetRegisterClass* rc, 1516 std::vector<LiveInterval*> &NewLIs) { 1517 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1518 re = mri_->reg_end(); ri != re; ) { 1519 MachineOperand &O = ri.getOperand(); 1520 MachineInstr *MI = &*ri; 1521 ++ri; 1522 if (MI->isDebugValue()) { 1523 // Remove debug info for now. 1524 O.setReg(0U); 1525 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1526 continue; 1527 } 1528 if (O.isDef()) { 1529 assert(MI->isImplicitDef() && 1530 "Register def was not rewritten?"); 1531 RemoveMachineInstrFromMaps(MI); 1532 vrm.RemoveMachineInstrFromMaps(MI); 1533 MI->eraseFromParent(); 1534 } else { 1535 // This must be an use of an implicit_def so it's not part of the live 1536 // interval. Create a new empty live interval for it. 1537 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1538 unsigned NewVReg = mri_->createVirtualRegister(rc); 1539 vrm.grow(); 1540 vrm.setIsImplicitlyDefined(NewVReg); 1541 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1542 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1543 MachineOperand &MO = MI->getOperand(i); 1544 if (MO.isReg() && MO.getReg() == li.reg) { 1545 MO.setReg(NewVReg); 1546 MO.setIsUndef(); 1547 } 1548 } 1549 } 1550 } 1551} 1552 1553float 1554LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 1555 // Limit the loop depth ridiculousness. 1556 if (loopDepth > 200) 1557 loopDepth = 200; 1558 1559 // The loop depth is used to roughly estimate the number of times the 1560 // instruction is executed. Something like 10^d is simple, but will quickly 1561 // overflow a float. This expression behaves like 10^d for small d, but is 1562 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 1563 // headroom before overflow. 1564 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth); 1565 1566 return (isDef + isUse) * lc; 1567} 1568 1569void 1570LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) { 1571 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) 1572 normalizeSpillWeight(*NewLIs[i]); 1573} 1574 1575std::vector<LiveInterval*> LiveIntervals:: 1576addIntervalsForSpills(const LiveInterval &li, 1577 SmallVectorImpl<LiveInterval*> &SpillIs, 1578 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1579 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1580 1581 DEBUG({ 1582 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1583 li.print(dbgs(), tri_); 1584 dbgs() << '\n'; 1585 }); 1586 1587 // Each bit specify whether a spill is required in the MBB. 1588 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1589 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1590 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1591 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1592 DenseMap<unsigned,unsigned> MBBVRegsMap; 1593 std::vector<LiveInterval*> NewLIs; 1594 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1595 1596 unsigned NumValNums = li.getNumValNums(); 1597 SmallVector<MachineInstr*, 4> ReMatDefs; 1598 ReMatDefs.resize(NumValNums, NULL); 1599 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1600 ReMatOrigDefs.resize(NumValNums, NULL); 1601 SmallVector<int, 4> ReMatIds; 1602 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1603 BitVector ReMatDelete(NumValNums); 1604 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1605 1606 // Spilling a split live interval. It cannot be split any further. Also, 1607 // it's also guaranteed to be a single val# / range interval. 1608 if (vrm.getPreSplitReg(li.reg)) { 1609 vrm.setIsSplitFromReg(li.reg, 0); 1610 // Unset the split kill marker on the last use. 1611 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1612 if (KillIdx != SlotIndex()) { 1613 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1614 assert(KillMI && "Last use disappeared?"); 1615 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1616 assert(KillOp != -1 && "Last use disappeared?"); 1617 KillMI->getOperand(KillOp).setIsKill(false); 1618 } 1619 vrm.removeKillPoint(li.reg); 1620 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1621 Slot = vrm.getStackSlot(li.reg); 1622 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1623 MachineInstr *ReMatDefMI = DefIsReMat ? 1624 vrm.getReMaterializedMI(li.reg) : NULL; 1625 int LdSlot = 0; 1626 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1627 bool isLoad = isLoadSS || 1628 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1629 bool IsFirstRange = true; 1630 for (LiveInterval::Ranges::const_iterator 1631 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1632 // If this is a split live interval with multiple ranges, it means there 1633 // are two-address instructions that re-defined the value. Only the 1634 // first def can be rematerialized! 1635 if (IsFirstRange) { 1636 // Note ReMatOrigDefMI has already been deleted. 1637 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1638 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1639 false, vrm, rc, ReMatIds, loopInfo, 1640 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1641 MBBVRegsMap, NewLIs); 1642 } else { 1643 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1644 Slot, 0, false, false, false, 1645 false, vrm, rc, ReMatIds, loopInfo, 1646 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1647 MBBVRegsMap, NewLIs); 1648 } 1649 IsFirstRange = false; 1650 } 1651 1652 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1653 normalizeSpillWeights(NewLIs); 1654 return NewLIs; 1655 } 1656 1657 bool TrySplit = !intervalIsInOneMBB(li); 1658 if (TrySplit) 1659 ++numSplits; 1660 bool NeedStackSlot = false; 1661 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1662 i != e; ++i) { 1663 const VNInfo *VNI = *i; 1664 unsigned VN = VNI->id; 1665 if (VNI->isUnused()) 1666 continue; // Dead val#. 1667 // Is the def for the val# rematerializable? 1668 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1669 ? getInstructionFromIndex(VNI->def) : 0; 1670 bool dummy; 1671 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1672 // Remember how to remat the def of this val#. 1673 ReMatOrigDefs[VN] = ReMatDefMI; 1674 // Original def may be modified so we have to make a copy here. 1675 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1676 CloneMIs.push_back(Clone); 1677 ReMatDefs[VN] = Clone; 1678 1679 bool CanDelete = true; 1680 if (VNI->hasPHIKill()) { 1681 // A kill is a phi node, not all of its uses can be rematerialized. 1682 // It must not be deleted. 1683 CanDelete = false; 1684 // Need a stack slot if there is any live range where uses cannot be 1685 // rematerialized. 1686 NeedStackSlot = true; 1687 } 1688 if (CanDelete) 1689 ReMatDelete.set(VN); 1690 } else { 1691 // Need a stack slot if there is any live range where uses cannot be 1692 // rematerialized. 1693 NeedStackSlot = true; 1694 } 1695 } 1696 1697 // One stack slot per live interval. 1698 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1699 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1700 Slot = vrm.assignVirt2StackSlot(li.reg); 1701 1702 // This case only occurs when the prealloc splitter has already assigned 1703 // a stack slot to this vreg. 1704 else 1705 Slot = vrm.getStackSlot(li.reg); 1706 } 1707 1708 // Create new intervals and rewrite defs and uses. 1709 for (LiveInterval::Ranges::const_iterator 1710 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1711 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1712 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1713 bool DefIsReMat = ReMatDefMI != NULL; 1714 bool CanDelete = ReMatDelete[I->valno->id]; 1715 int LdSlot = 0; 1716 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1717 bool isLoad = isLoadSS || 1718 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1719 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1720 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1721 CanDelete, vrm, rc, ReMatIds, loopInfo, 1722 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1723 MBBVRegsMap, NewLIs); 1724 } 1725 1726 // Insert spills / restores if we are splitting. 1727 if (!TrySplit) { 1728 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1729 normalizeSpillWeights(NewLIs); 1730 return NewLIs; 1731 } 1732 1733 SmallPtrSet<LiveInterval*, 4> AddedKill; 1734 SmallVector<unsigned, 2> Ops; 1735 if (NeedStackSlot) { 1736 int Id = SpillMBBs.find_first(); 1737 while (Id != -1) { 1738 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1739 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1740 SlotIndex index = spills[i].index; 1741 unsigned VReg = spills[i].vreg; 1742 LiveInterval &nI = getOrCreateInterval(VReg); 1743 bool isReMat = vrm.isReMaterialized(VReg); 1744 MachineInstr *MI = getInstructionFromIndex(index); 1745 bool CanFold = false; 1746 bool FoundUse = false; 1747 Ops.clear(); 1748 if (spills[i].canFold) { 1749 CanFold = true; 1750 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1751 MachineOperand &MO = MI->getOperand(j); 1752 if (!MO.isReg() || MO.getReg() != VReg) 1753 continue; 1754 1755 Ops.push_back(j); 1756 if (MO.isDef()) 1757 continue; 1758 if (isReMat || 1759 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1760 RestoreMBBs, RestoreIdxes))) { 1761 // MI has two-address uses of the same register. If the use 1762 // isn't the first and only use in the BB, then we can't fold 1763 // it. FIXME: Move this to rewriteInstructionsForSpills. 1764 CanFold = false; 1765 break; 1766 } 1767 FoundUse = true; 1768 } 1769 } 1770 // Fold the store into the def if possible. 1771 bool Folded = false; 1772 if (CanFold && !Ops.empty()) { 1773 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1774 Folded = true; 1775 if (FoundUse) { 1776 // Also folded uses, do not issue a load. 1777 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1778 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1779 } 1780 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1781 } 1782 } 1783 1784 // Otherwise tell the spiller to issue a spill. 1785 if (!Folded) { 1786 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1787 bool isKill = LR->end == index.getStoreIndex(); 1788 if (!MI->registerDefIsDead(nI.reg)) 1789 // No need to spill a dead def. 1790 vrm.addSpillPoint(VReg, isKill, MI); 1791 if (isKill) 1792 AddedKill.insert(&nI); 1793 } 1794 } 1795 Id = SpillMBBs.find_next(Id); 1796 } 1797 } 1798 1799 int Id = RestoreMBBs.find_first(); 1800 while (Id != -1) { 1801 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1802 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1803 SlotIndex index = restores[i].index; 1804 if (index == SlotIndex()) 1805 continue; 1806 unsigned VReg = restores[i].vreg; 1807 LiveInterval &nI = getOrCreateInterval(VReg); 1808 bool isReMat = vrm.isReMaterialized(VReg); 1809 MachineInstr *MI = getInstructionFromIndex(index); 1810 bool CanFold = false; 1811 Ops.clear(); 1812 if (restores[i].canFold) { 1813 CanFold = true; 1814 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1815 MachineOperand &MO = MI->getOperand(j); 1816 if (!MO.isReg() || MO.getReg() != VReg) 1817 continue; 1818 1819 if (MO.isDef()) { 1820 // If this restore were to be folded, it would have been folded 1821 // already. 1822 CanFold = false; 1823 break; 1824 } 1825 Ops.push_back(j); 1826 } 1827 } 1828 1829 // Fold the load into the use if possible. 1830 bool Folded = false; 1831 if (CanFold && !Ops.empty()) { 1832 if (!isReMat) 1833 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1834 else { 1835 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1836 int LdSlot = 0; 1837 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1838 // If the rematerializable def is a load, also try to fold it. 1839 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1840 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1841 Ops, isLoadSS, LdSlot, VReg); 1842 if (!Folded) { 1843 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1844 if (ImpUse) { 1845 // Re-matting an instruction with virtual register use. Add the 1846 // register as an implicit use on the use MI and mark the register 1847 // interval as unspillable. 1848 LiveInterval &ImpLi = getInterval(ImpUse); 1849 ImpLi.markNotSpillable(); 1850 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1851 } 1852 } 1853 } 1854 } 1855 // If folding is not possible / failed, then tell the spiller to issue a 1856 // load / rematerialization for us. 1857 if (Folded) 1858 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1859 else 1860 vrm.addRestorePoint(VReg, MI); 1861 } 1862 Id = RestoreMBBs.find_next(Id); 1863 } 1864 1865 // Finalize intervals: add kills, finalize spill weights, and filter out 1866 // dead intervals. 1867 std::vector<LiveInterval*> RetNewLIs; 1868 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1869 LiveInterval *LI = NewLIs[i]; 1870 if (!LI->empty()) { 1871 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 1872 if (!AddedKill.count(LI)) { 1873 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1874 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 1875 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1876 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1877 assert(UseIdx != -1); 1878 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 1879 LastUse->getOperand(UseIdx).setIsKill(); 1880 vrm.addKillPoint(LI->reg, LastUseIdx); 1881 } 1882 } 1883 RetNewLIs.push_back(LI); 1884 } 1885 } 1886 1887 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1888 normalizeSpillWeights(RetNewLIs); 1889 return RetNewLIs; 1890} 1891 1892/// hasAllocatableSuperReg - Return true if the specified physical register has 1893/// any super register that's allocatable. 1894bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1895 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1896 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1897 return true; 1898 return false; 1899} 1900 1901/// getRepresentativeReg - Find the largest super register of the specified 1902/// physical register. 1903unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1904 // Find the largest super-register that is allocatable. 1905 unsigned BestReg = Reg; 1906 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1907 unsigned SuperReg = *AS; 1908 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1909 BestReg = SuperReg; 1910 break; 1911 } 1912 } 1913 return BestReg; 1914} 1915 1916/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1917/// specified interval that conflicts with the specified physical register. 1918unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1919 unsigned PhysReg) const { 1920 unsigned NumConflicts = 0; 1921 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 1922 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1923 E = mri_->reg_end(); I != E; ++I) { 1924 MachineOperand &O = I.getOperand(); 1925 MachineInstr *MI = O.getParent(); 1926 if (MI->isDebugValue()) 1927 continue; 1928 SlotIndex Index = getInstructionIndex(MI); 1929 if (pli.liveAt(Index)) 1930 ++NumConflicts; 1931 } 1932 return NumConflicts; 1933} 1934 1935/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 1936/// around all defs and uses of the specified interval. Return true if it 1937/// was able to cut its interval. 1938bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 1939 unsigned PhysReg, VirtRegMap &vrm) { 1940 unsigned SpillReg = getRepresentativeReg(PhysReg); 1941 1942 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 1943 // If there are registers which alias PhysReg, but which are not a 1944 // sub-register of the chosen representative super register. Assert 1945 // since we can't handle it yet. 1946 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 1947 tri_->isSuperRegister(*AS, SpillReg)); 1948 1949 bool Cut = false; 1950 SmallVector<unsigned, 4> PRegs; 1951 if (hasInterval(SpillReg)) 1952 PRegs.push_back(SpillReg); 1953 else { 1954 SmallSet<unsigned, 4> Added; 1955 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 1956 if (Added.insert(*AS) && hasInterval(*AS)) { 1957 PRegs.push_back(*AS); 1958 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 1959 Added.insert(*ASS); 1960 } 1961 } 1962 1963 SmallPtrSet<MachineInstr*, 8> SeenMIs; 1964 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1965 E = mri_->reg_end(); I != E; ++I) { 1966 MachineOperand &O = I.getOperand(); 1967 MachineInstr *MI = O.getParent(); 1968 if (MI->isDebugValue() || SeenMIs.count(MI)) 1969 continue; 1970 SeenMIs.insert(MI); 1971 SlotIndex Index = getInstructionIndex(MI); 1972 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 1973 unsigned PReg = PRegs[i]; 1974 LiveInterval &pli = getInterval(PReg); 1975 if (!pli.liveAt(Index)) 1976 continue; 1977 vrm.addEmergencySpill(PReg, MI); 1978 SlotIndex StartIdx = Index.getLoadIndex(); 1979 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 1980 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 1981 pli.removeRange(StartIdx, EndIdx); 1982 Cut = true; 1983 } else { 1984 std::string msg; 1985 raw_string_ostream Msg(msg); 1986 Msg << "Ran out of registers during register allocation!"; 1987 if (MI->isInlineAsm()) { 1988 Msg << "\nPlease check your inline asm statement for invalid " 1989 << "constraints:\n"; 1990 MI->print(Msg, tm_); 1991 } 1992 report_fatal_error(Msg.str()); 1993 } 1994 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 1995 if (!hasInterval(*AS)) 1996 continue; 1997 LiveInterval &spli = getInterval(*AS); 1998 if (spli.liveAt(Index)) 1999 spli.removeRange(Index.getLoadIndex(), 2000 Index.getNextIndex().getBaseIndex()); 2001 } 2002 } 2003 } 2004 return Cut; 2005} 2006 2007LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2008 MachineInstr* startInst) { 2009 LiveInterval& Interval = getOrCreateInterval(reg); 2010 VNInfo* VN = Interval.getNextValue( 2011 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2012 startInst, true, getVNInfoAllocator()); 2013 VN->setHasPHIKill(true); 2014 LiveRange LR( 2015 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2016 getMBBEndIdx(startInst->getParent()), VN); 2017 Interval.addRange(LR); 2018 2019 return LR; 2020} 2021 2022