LiveIntervalAnalysis.cpp revision 6374c3d4d7b1fece8ed9acb590f809a0e6fb17ee
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53STATISTIC(numIntervals , "Number of original intervals"); 54STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 55STATISTIC(numSplits , "Number of intervals split"); 56 57char LiveIntervals::ID = 0; 58INITIALIZE_PASS(LiveIntervals, "liveintervals", 59 "Live Interval Analysis", false, false); 60 61void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 62 AU.setPreservesCFG(); 63 AU.addRequired<AliasAnalysis>(); 64 AU.addPreserved<AliasAnalysis>(); 65 AU.addPreserved<LiveVariables>(); 66 AU.addRequired<LiveVariables>(); 67 AU.addPreservedID(MachineLoopInfoID); 68 AU.addPreservedID(MachineDominatorsID); 69 70 if (!StrongPHIElim) { 71 AU.addPreservedID(PHIEliminationID); 72 AU.addRequiredID(PHIEliminationID); 73 } 74 75 AU.addRequiredID(TwoAddressInstructionPassID); 76 AU.addPreserved<ProcessImplicitDefs>(); 77 AU.addRequired<ProcessImplicitDefs>(); 78 AU.addPreserved<SlotIndexes>(); 79 AU.addRequiredTransitive<SlotIndexes>(); 80 MachineFunctionPass::getAnalysisUsage(AU); 81} 82 83void LiveIntervals::releaseMemory() { 84 // Free the live intervals themselves. 85 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 86 E = r2iMap_.end(); I != E; ++I) 87 delete I->second; 88 89 r2iMap_.clear(); 90 91 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. 92 VNInfoAllocator.Reset(); 93 while (!CloneMIs.empty()) { 94 MachineInstr *MI = CloneMIs.back(); 95 CloneMIs.pop_back(); 96 mf_->DeleteMachineInstr(MI); 97 } 98} 99 100/// runOnMachineFunction - Register allocate the whole function 101/// 102bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 103 mf_ = &fn; 104 mri_ = &mf_->getRegInfo(); 105 tm_ = &fn.getTarget(); 106 tri_ = tm_->getRegisterInfo(); 107 tii_ = tm_->getInstrInfo(); 108 aa_ = &getAnalysis<AliasAnalysis>(); 109 lv_ = &getAnalysis<LiveVariables>(); 110 indexes_ = &getAnalysis<SlotIndexes>(); 111 allocatableRegs_ = tri_->getAllocatableSet(fn); 112 113 computeIntervals(); 114 115 numIntervals += getNumIntervals(); 116 117 DEBUG(dump()); 118 return true; 119} 120 121/// print - Implement the dump method. 122void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 123 OS << "********** INTERVALS **********\n"; 124 for (const_iterator I = begin(), E = end(); I != E; ++I) { 125 I->second->print(OS, tri_); 126 OS << "\n"; 127 } 128 129 printInstrs(OS); 130} 131 132void LiveIntervals::printInstrs(raw_ostream &OS) const { 133 OS << "********** MACHINEINSTRS **********\n"; 134 135 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 136 mbbi != mbbe; ++mbbi) { 137 OS << "BB#" << mbbi->getNumber() 138 << ":\t\t# derived from " << mbbi->getName() << "\n"; 139 for (MachineBasicBlock::iterator mii = mbbi->begin(), 140 mie = mbbi->end(); mii != mie; ++mii) { 141 if (mii->isDebugValue()) 142 OS << " \t" << *mii; 143 else 144 OS << getInstructionIndex(mii) << '\t' << *mii; 145 } 146 } 147} 148 149void LiveIntervals::dumpInstrs() const { 150 printInstrs(dbgs()); 151} 152 153bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 154 VirtRegMap &vrm, unsigned reg) { 155 // We don't handle fancy stuff crossing basic block boundaries 156 if (li.ranges.size() != 1) 157 return true; 158 const LiveRange &range = li.ranges.front(); 159 SlotIndex idx = range.start.getBaseIndex(); 160 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 161 162 // Skip deleted instructions 163 MachineInstr *firstMI = getInstructionFromIndex(idx); 164 while (!firstMI && idx != end) { 165 idx = idx.getNextIndex(); 166 firstMI = getInstructionFromIndex(idx); 167 } 168 if (!firstMI) 169 return false; 170 171 // Find last instruction in range 172 SlotIndex lastIdx = end.getPrevIndex(); 173 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 174 while (!lastMI && lastIdx != idx) { 175 lastIdx = lastIdx.getPrevIndex(); 176 lastMI = getInstructionFromIndex(lastIdx); 177 } 178 if (!lastMI) 179 return false; 180 181 // Range cannot cross basic block boundaries or terminators 182 MachineBasicBlock *MBB = firstMI->getParent(); 183 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 184 return true; 185 186 MachineBasicBlock::const_iterator E = lastMI; 187 ++E; 188 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 189 const MachineInstr &MI = *I; 190 191 // Allow copies to and from li.reg 192 if (MI.isCopy()) 193 if (MI.getOperand(0).getReg() == li.reg || 194 MI.getOperand(1).getReg() == li.reg) 195 continue; 196 197 // Check for operands using reg 198 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 199 const MachineOperand& mop = MI.getOperand(i); 200 if (!mop.isReg()) 201 continue; 202 unsigned PhysReg = mop.getReg(); 203 if (PhysReg == 0 || PhysReg == li.reg) 204 continue; 205 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 206 if (!vrm.hasPhys(PhysReg)) 207 continue; 208 PhysReg = vrm.getPhys(PhysReg); 209 } 210 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 211 return true; 212 } 213 } 214 215 // No conflicts found. 216 return false; 217} 218 219bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg, 220 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 221 for (LiveInterval::Ranges::const_iterator 222 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 223 for (SlotIndex index = I->start.getBaseIndex(), 224 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 225 index != end; 226 index = index.getNextIndex()) { 227 MachineInstr *MI = getInstructionFromIndex(index); 228 if (!MI) 229 continue; // skip deleted instructions 230 231 if (JoinedCopies.count(MI)) 232 continue; 233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 234 MachineOperand& MO = MI->getOperand(i); 235 if (!MO.isReg()) 236 continue; 237 unsigned PhysReg = MO.getReg(); 238 if (PhysReg == 0 || PhysReg == Reg || 239 TargetRegisterInfo::isVirtualRegister(PhysReg)) 240 continue; 241 if (tri_->regsOverlap(Reg, PhysReg)) 242 return true; 243 } 244 } 245 } 246 247 return false; 248} 249 250#ifndef NDEBUG 251static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 252 if (TargetRegisterInfo::isPhysicalRegister(reg)) 253 dbgs() << tri_->getName(reg); 254 else 255 dbgs() << "%reg" << reg; 256} 257#endif 258 259static 260bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 261 unsigned Reg = MI.getOperand(MOIdx).getReg(); 262 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 263 const MachineOperand &MO = MI.getOperand(i); 264 if (!MO.isReg()) 265 continue; 266 if (MO.getReg() == Reg && MO.isDef()) { 267 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 268 MI.getOperand(MOIdx).getSubReg() && 269 (MO.getSubReg() || MO.isImplicit())); 270 return true; 271 } 272 } 273 return false; 274} 275 276/// isPartialRedef - Return true if the specified def at the specific index is 277/// partially re-defining the specified live interval. A common case of this is 278/// a definition of the sub-register. 279bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 280 LiveInterval &interval) { 281 if (!MO.getSubReg() || MO.isEarlyClobber()) 282 return false; 283 284 SlotIndex RedefIndex = MIIdx.getDefIndex(); 285 const LiveRange *OldLR = 286 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 287 if (OldLR->valno->isDefAccurate()) { 288 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 289 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 290 } 291 return false; 292} 293 294void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 295 MachineBasicBlock::iterator mi, 296 SlotIndex MIIdx, 297 MachineOperand& MO, 298 unsigned MOIdx, 299 LiveInterval &interval) { 300 DEBUG({ 301 dbgs() << "\t\tregister: "; 302 printRegName(interval.reg, tri_); 303 }); 304 305 // Virtual registers may be defined multiple times (due to phi 306 // elimination and 2-addr elimination). Much of what we do only has to be 307 // done once for the vreg. We use an empty interval to detect the first 308 // time we see a vreg. 309 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 310 if (interval.empty()) { 311 // Get the Idx of the defining instructions. 312 SlotIndex defIndex = MIIdx.getDefIndex(); 313 // Earlyclobbers move back one, so that they overlap the live range 314 // of inputs. 315 if (MO.isEarlyClobber()) 316 defIndex = MIIdx.getUseIndex(); 317 318 // Make sure the first definition is not a partial redefinition. Add an 319 // <imp-def> of the full register. 320 if (MO.getSubReg()) 321 mi->addRegisterDefined(interval.reg); 322 323 MachineInstr *CopyMI = NULL; 324 if (mi->isCopyLike()) { 325 CopyMI = mi; 326 } 327 328 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, 329 VNInfoAllocator); 330 assert(ValNo->id == 0 && "First value in interval is not 0?"); 331 332 // Loop over all of the blocks that the vreg is defined in. There are 333 // two cases we have to handle here. The most common case is a vreg 334 // whose lifetime is contained within a basic block. In this case there 335 // will be a single kill, in MBB, which comes after the definition. 336 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 337 // FIXME: what about dead vars? 338 SlotIndex killIdx; 339 if (vi.Kills[0] != mi) 340 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 341 else 342 killIdx = defIndex.getStoreIndex(); 343 344 // If the kill happens after the definition, we have an intra-block 345 // live range. 346 if (killIdx > defIndex) { 347 assert(vi.AliveBlocks.empty() && 348 "Shouldn't be alive across any blocks!"); 349 LiveRange LR(defIndex, killIdx, ValNo); 350 interval.addRange(LR); 351 DEBUG(dbgs() << " +" << LR << "\n"); 352 return; 353 } 354 } 355 356 // The other case we handle is when a virtual register lives to the end 357 // of the defining block, potentially live across some blocks, then is 358 // live into some number of blocks, but gets killed. Start by adding a 359 // range that goes from this definition to the end of the defining block. 360 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 361 DEBUG(dbgs() << " +" << NewLR); 362 interval.addRange(NewLR); 363 364 bool PHIJoin = lv_->isPHIJoin(interval.reg); 365 366 if (PHIJoin) { 367 // A phi join register is killed at the end of the MBB and revived as a new 368 // valno in the killing blocks. 369 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 370 DEBUG(dbgs() << " phi-join"); 371 ValNo->setHasPHIKill(true); 372 } else { 373 // Iterate over all of the blocks that the variable is completely 374 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 375 // live interval. 376 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 377 E = vi.AliveBlocks.end(); I != E; ++I) { 378 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); 379 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); 380 interval.addRange(LR); 381 DEBUG(dbgs() << " +" << LR); 382 } 383 } 384 385 // Finally, this virtual register is live from the start of any killing 386 // block to the 'use' slot of the killing instruction. 387 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 388 MachineInstr *Kill = vi.Kills[i]; 389 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 390 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); 391 392 // Create interval with one of a NEW value number. Note that this value 393 // number isn't actually defined by an instruction, weird huh? :) 394 if (PHIJoin) { 395 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, 396 VNInfoAllocator); 397 ValNo->setIsPHIDef(true); 398 } 399 LiveRange LR(Start, killIdx, ValNo); 400 interval.addRange(LR); 401 DEBUG(dbgs() << " +" << LR); 402 } 403 404 } else { 405 if (MultipleDefsBySameMI(*mi, MOIdx)) 406 // Multiple defs of the same virtual register by the same instruction. 407 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 408 // This is likely due to elimination of REG_SEQUENCE instructions. Return 409 // here since there is nothing to do. 410 return; 411 412 // If this is the second time we see a virtual register definition, it 413 // must be due to phi elimination or two addr elimination. If this is 414 // the result of two address elimination, then the vreg is one of the 415 // def-and-use register operand. 416 417 // It may also be partial redef like this: 418 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 419 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 420 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 421 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 422 // If this is a two-address definition, then we have already processed 423 // the live range. The only problem is that we didn't realize there 424 // are actually two values in the live interval. Because of this we 425 // need to take the LiveRegion that defines this register and split it 426 // into two values. 427 SlotIndex RedefIndex = MIIdx.getDefIndex(); 428 if (MO.isEarlyClobber()) 429 RedefIndex = MIIdx.getUseIndex(); 430 431 const LiveRange *OldLR = 432 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 433 VNInfo *OldValNo = OldLR->valno; 434 SlotIndex DefIndex = OldValNo->def.getDefIndex(); 435 436 // Delete the previous value, which should be short and continuous, 437 // because the 2-addr copy must be in the same MBB as the redef. 438 interval.removeRange(DefIndex, RedefIndex); 439 440 // The new value number (#1) is defined by the instruction we claimed 441 // defined value #0. 442 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 443 false, // update at * 444 VNInfoAllocator); 445 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 446 447 // Value#0 is now defined by the 2-addr instruction. 448 OldValNo->def = RedefIndex; 449 OldValNo->setCopy(0); 450 451 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ... 452 if (PartReDef && mi->isCopyLike()) 453 OldValNo->setCopy(&*mi); 454 455 // Add the new live interval which replaces the range for the input copy. 456 LiveRange LR(DefIndex, RedefIndex, ValNo); 457 DEBUG(dbgs() << " replace range with " << LR); 458 interval.addRange(LR); 459 460 // If this redefinition is dead, we need to add a dummy unit live 461 // range covering the def slot. 462 if (MO.isDead()) 463 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 464 OldValNo)); 465 466 DEBUG({ 467 dbgs() << " RESULT: "; 468 interval.print(dbgs(), tri_); 469 }); 470 } else if (lv_->isPHIJoin(interval.reg)) { 471 // In the case of PHI elimination, each variable definition is only 472 // live until the end of the block. We've already taken care of the 473 // rest of the live range. 474 475 SlotIndex defIndex = MIIdx.getDefIndex(); 476 if (MO.isEarlyClobber()) 477 defIndex = MIIdx.getUseIndex(); 478 479 VNInfo *ValNo; 480 MachineInstr *CopyMI = NULL; 481 if (mi->isCopyLike()) 482 CopyMI = mi; 483 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 484 485 SlotIndex killIndex = getMBBEndIdx(mbb); 486 LiveRange LR(defIndex, killIndex, ValNo); 487 interval.addRange(LR); 488 ValNo->setHasPHIKill(true); 489 DEBUG(dbgs() << " phi-join +" << LR); 490 } else { 491 llvm_unreachable("Multiply defined register"); 492 } 493 } 494 495 DEBUG(dbgs() << '\n'); 496} 497 498void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 499 MachineBasicBlock::iterator mi, 500 SlotIndex MIIdx, 501 MachineOperand& MO, 502 LiveInterval &interval, 503 MachineInstr *CopyMI) { 504 // A physical register cannot be live across basic block, so its 505 // lifetime must end somewhere in its defining basic block. 506 DEBUG({ 507 dbgs() << "\t\tregister: "; 508 printRegName(interval.reg, tri_); 509 }); 510 511 SlotIndex baseIndex = MIIdx; 512 SlotIndex start = baseIndex.getDefIndex(); 513 // Earlyclobbers move back one. 514 if (MO.isEarlyClobber()) 515 start = MIIdx.getUseIndex(); 516 SlotIndex end = start; 517 518 // If it is not used after definition, it is considered dead at 519 // the instruction defining it. Hence its interval is: 520 // [defSlot(def), defSlot(def)+1) 521 // For earlyclobbers, the defSlot was pushed back one; the extra 522 // advance below compensates. 523 if (MO.isDead()) { 524 DEBUG(dbgs() << " dead"); 525 end = start.getStoreIndex(); 526 goto exit; 527 } 528 529 // If it is not dead on definition, it must be killed by a 530 // subsequent instruction. Hence its interval is: 531 // [defSlot(def), useSlot(kill)+1) 532 baseIndex = baseIndex.getNextIndex(); 533 while (++mi != MBB->end()) { 534 535 if (mi->isDebugValue()) 536 continue; 537 if (getInstructionFromIndex(baseIndex) == 0) 538 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 539 540 if (mi->killsRegister(interval.reg, tri_)) { 541 DEBUG(dbgs() << " killed"); 542 end = baseIndex.getDefIndex(); 543 goto exit; 544 } else { 545 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_); 546 if (DefIdx != -1) { 547 if (mi->isRegTiedToUseOperand(DefIdx)) { 548 // Two-address instruction. 549 end = baseIndex.getDefIndex(); 550 } else { 551 // Another instruction redefines the register before it is ever read. 552 // Then the register is essentially dead at the instruction that 553 // defines it. Hence its interval is: 554 // [defSlot(def), defSlot(def)+1) 555 DEBUG(dbgs() << " dead"); 556 end = start.getStoreIndex(); 557 } 558 goto exit; 559 } 560 } 561 562 baseIndex = baseIndex.getNextIndex(); 563 } 564 565 // The only case we should have a dead physreg here without a killing or 566 // instruction where we know it's dead is if it is live-in to the function 567 // and never used. Another possible case is the implicit use of the 568 // physical register has been deleted by two-address pass. 569 end = start.getStoreIndex(); 570 571exit: 572 assert(start < end && "did not find end of interval?"); 573 574 // Already exists? Extend old live interval. 575 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 576 bool Extend = OldLR != interval.end(); 577 VNInfo *ValNo = Extend 578 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 579 if (MO.isEarlyClobber() && Extend) 580 ValNo->setHasRedefByEC(true); 581 LiveRange LR(start, end, ValNo); 582 interval.addRange(LR); 583 DEBUG(dbgs() << " +" << LR << '\n'); 584} 585 586void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 587 MachineBasicBlock::iterator MI, 588 SlotIndex MIIdx, 589 MachineOperand& MO, 590 unsigned MOIdx) { 591 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 592 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 593 getOrCreateInterval(MO.getReg())); 594 else if (allocatableRegs_[MO.getReg()]) { 595 MachineInstr *CopyMI = NULL; 596 if (MI->isCopyLike()) 597 CopyMI = MI; 598 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 599 getOrCreateInterval(MO.getReg()), CopyMI); 600 // Def of a register also defines its sub-registers. 601 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 602 // If MI also modifies the sub-register explicitly, avoid processing it 603 // more than once. Do not pass in TRI here so it checks for exact match. 604 if (!MI->definesRegister(*AS)) 605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 606 getOrCreateInterval(*AS), 0); 607 } 608} 609 610void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 611 SlotIndex MIIdx, 612 LiveInterval &interval, bool isAlias) { 613 DEBUG({ 614 dbgs() << "\t\tlivein register: "; 615 printRegName(interval.reg, tri_); 616 }); 617 618 // Look for kills, if it reaches a def before it's killed, then it shouldn't 619 // be considered a livein. 620 MachineBasicBlock::iterator mi = MBB->begin(); 621 MachineBasicBlock::iterator E = MBB->end(); 622 // Skip over DBG_VALUE at the start of the MBB. 623 if (mi != E && mi->isDebugValue()) { 624 while (++mi != E && mi->isDebugValue()) 625 ; 626 if (mi == E) 627 // MBB is empty except for DBG_VALUE's. 628 return; 629 } 630 631 SlotIndex baseIndex = MIIdx; 632 SlotIndex start = baseIndex; 633 if (getInstructionFromIndex(baseIndex) == 0) 634 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 635 636 SlotIndex end = baseIndex; 637 bool SeenDefUse = false; 638 639 while (mi != E) { 640 if (mi->killsRegister(interval.reg, tri_)) { 641 DEBUG(dbgs() << " killed"); 642 end = baseIndex.getDefIndex(); 643 SeenDefUse = true; 644 break; 645 } else if (mi->definesRegister(interval.reg, tri_)) { 646 // Another instruction redefines the register before it is ever read. 647 // Then the register is essentially dead at the instruction that defines 648 // it. Hence its interval is: 649 // [defSlot(def), defSlot(def)+1) 650 DEBUG(dbgs() << " dead"); 651 end = start.getStoreIndex(); 652 SeenDefUse = true; 653 break; 654 } 655 656 while (++mi != E && mi->isDebugValue()) 657 // Skip over DBG_VALUE. 658 ; 659 if (mi != E) 660 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 661 } 662 663 // Live-in register might not be used at all. 664 if (!SeenDefUse) { 665 if (isAlias) { 666 DEBUG(dbgs() << " dead"); 667 end = MIIdx.getStoreIndex(); 668 } else { 669 DEBUG(dbgs() << " live through"); 670 end = baseIndex; 671 } 672 } 673 674 VNInfo *vni = 675 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 676 0, false, VNInfoAllocator); 677 vni->setIsPHIDef(true); 678 LiveRange LR(start, end, vni); 679 680 interval.addRange(LR); 681 DEBUG(dbgs() << " +" << LR << '\n'); 682} 683 684/// computeIntervals - computes the live intervals for virtual 685/// registers. for some ordering of the machine instructions [1,N] a 686/// live interval is an interval [i, j) where 1 <= i <= j < N for 687/// which a variable is live 688void LiveIntervals::computeIntervals() { 689 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 690 << "********** Function: " 691 << ((Value*)mf_->getFunction())->getName() << '\n'); 692 693 SmallVector<unsigned, 8> UndefUses; 694 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 695 MBBI != E; ++MBBI) { 696 MachineBasicBlock *MBB = MBBI; 697 if (MBB->empty()) 698 continue; 699 700 // Track the index of the current machine instr. 701 SlotIndex MIIndex = getMBBStartIdx(MBB); 702 DEBUG(dbgs() << "BB#" << MBB->getNumber() 703 << ":\t\t# derived from " << MBB->getName() << "\n"); 704 705 // Create intervals for live-ins to this BB first. 706 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 707 LE = MBB->livein_end(); LI != LE; ++LI) { 708 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 709 // Multiple live-ins can alias the same register. 710 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 711 if (!hasInterval(*AS)) 712 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 713 true); 714 } 715 716 // Skip over empty initial indices. 717 if (getInstructionFromIndex(MIIndex) == 0) 718 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 719 720 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 721 MI != miEnd; ++MI) { 722 DEBUG(dbgs() << MIIndex << "\t" << *MI); 723 if (MI->isDebugValue()) 724 continue; 725 726 // Handle defs. 727 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 728 MachineOperand &MO = MI->getOperand(i); 729 if (!MO.isReg() || !MO.getReg()) 730 continue; 731 732 // handle register defs - build intervals 733 if (MO.isDef()) 734 handleRegisterDef(MBB, MI, MIIndex, MO, i); 735 else if (MO.isUndef()) 736 UndefUses.push_back(MO.getReg()); 737 } 738 739 // Move to the next instr slot. 740 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 741 } 742 } 743 744 // Create empty intervals for registers defined by implicit_def's (except 745 // for those implicit_def that define values which are liveout of their 746 // blocks. 747 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 748 unsigned UndefReg = UndefUses[i]; 749 (void)getOrCreateInterval(UndefReg); 750 } 751} 752 753LiveInterval* LiveIntervals::createInterval(unsigned reg) { 754 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 755 return new LiveInterval(reg, Weight); 756} 757 758/// dupInterval - Duplicate a live interval. The caller is responsible for 759/// managing the allocated memory. 760LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 761 LiveInterval *NewLI = createInterval(li->reg); 762 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 763 return NewLI; 764} 765 766//===----------------------------------------------------------------------===// 767// Register allocator hooks. 768// 769 770/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 771/// allow one) virtual register operand, then its uses are implicitly using 772/// the register. Returns the virtual register. 773unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 774 MachineInstr *MI) const { 775 unsigned RegOp = 0; 776 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 777 MachineOperand &MO = MI->getOperand(i); 778 if (!MO.isReg() || !MO.isUse()) 779 continue; 780 unsigned Reg = MO.getReg(); 781 if (Reg == 0 || Reg == li.reg) 782 continue; 783 784 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 785 !allocatableRegs_[Reg]) 786 continue; 787 // FIXME: For now, only remat MI with at most one register operand. 788 assert(!RegOp && 789 "Can't rematerialize instruction with multiple register operand!"); 790 RegOp = MO.getReg(); 791#ifndef NDEBUG 792 break; 793#endif 794 } 795 return RegOp; 796} 797 798/// isValNoAvailableAt - Return true if the val# of the specified interval 799/// which reaches the given instruction also reaches the specified use index. 800bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 801 SlotIndex UseIdx) const { 802 SlotIndex Index = getInstructionIndex(MI); 803 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 804 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 805 return UI != li.end() && UI->valno == ValNo; 806} 807 808/// isReMaterializable - Returns true if the definition MI of the specified 809/// val# of the specified interval is re-materializable. 810bool LiveIntervals::isReMaterializable(const LiveInterval &li, 811 const VNInfo *ValNo, MachineInstr *MI, 812 SmallVectorImpl<LiveInterval*> &SpillIs, 813 bool &isLoad) { 814 if (DisableReMat) 815 return false; 816 817 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 818 return false; 819 820 // Target-specific code can mark an instruction as being rematerializable 821 // if it has one virtual reg use, though it had better be something like 822 // a PIC base register which is likely to be live everywhere. 823 unsigned ImpUse = getReMatImplicitUse(li, MI); 824 if (ImpUse) { 825 const LiveInterval &ImpLi = getInterval(ImpUse); 826 for (MachineRegisterInfo::use_nodbg_iterator 827 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end(); 828 ri != re; ++ri) { 829 MachineInstr *UseMI = &*ri; 830 SlotIndex UseIdx = getInstructionIndex(UseMI); 831 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 832 continue; 833 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 834 return false; 835 } 836 837 // If a register operand of the re-materialized instruction is going to 838 // be spilled next, then it's not legal to re-materialize this instruction. 839 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 840 if (ImpUse == SpillIs[i]->reg) 841 return false; 842 } 843 return true; 844} 845 846/// isReMaterializable - Returns true if the definition MI of the specified 847/// val# of the specified interval is re-materializable. 848bool LiveIntervals::isReMaterializable(const LiveInterval &li, 849 const VNInfo *ValNo, MachineInstr *MI) { 850 SmallVector<LiveInterval*, 4> Dummy1; 851 bool Dummy2; 852 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 853} 854 855/// isReMaterializable - Returns true if every definition of MI of every 856/// val# of the specified interval is re-materializable. 857bool LiveIntervals::isReMaterializable(const LiveInterval &li, 858 SmallVectorImpl<LiveInterval*> &SpillIs, 859 bool &isLoad) { 860 isLoad = false; 861 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 862 i != e; ++i) { 863 const VNInfo *VNI = *i; 864 if (VNI->isUnused()) 865 continue; // Dead val#. 866 // Is the def for the val# rematerializable? 867 if (!VNI->isDefAccurate()) 868 return false; 869 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 870 bool DefIsLoad = false; 871 if (!ReMatDefMI || 872 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 873 return false; 874 isLoad |= DefIsLoad; 875 } 876 return true; 877} 878 879/// FilterFoldedOps - Filter out two-address use operands. Return 880/// true if it finds any issue with the operands that ought to prevent 881/// folding. 882static bool FilterFoldedOps(MachineInstr *MI, 883 SmallVector<unsigned, 2> &Ops, 884 unsigned &MRInfo, 885 SmallVector<unsigned, 2> &FoldOps) { 886 MRInfo = 0; 887 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 888 unsigned OpIdx = Ops[i]; 889 MachineOperand &MO = MI->getOperand(OpIdx); 890 // FIXME: fold subreg use. 891 if (MO.getSubReg()) 892 return true; 893 if (MO.isDef()) 894 MRInfo |= (unsigned)VirtRegMap::isMod; 895 else { 896 // Filter out two-address use operand(s). 897 if (MI->isRegTiedToDefOperand(OpIdx)) { 898 MRInfo = VirtRegMap::isModRef; 899 continue; 900 } 901 MRInfo |= (unsigned)VirtRegMap::isRef; 902 } 903 FoldOps.push_back(OpIdx); 904 } 905 return false; 906} 907 908 909/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 910/// slot / to reg or any rematerialized load into ith operand of specified 911/// MI. If it is successul, MI is updated with the newly created MI and 912/// returns true. 913bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 914 VirtRegMap &vrm, MachineInstr *DefMI, 915 SlotIndex InstrIdx, 916 SmallVector<unsigned, 2> &Ops, 917 bool isSS, int Slot, unsigned Reg) { 918 // If it is an implicit def instruction, just delete it. 919 if (MI->isImplicitDef()) { 920 RemoveMachineInstrFromMaps(MI); 921 vrm.RemoveMachineInstrFromMaps(MI); 922 MI->eraseFromParent(); 923 ++numFolds; 924 return true; 925 } 926 927 // Filter the list of operand indexes that are to be folded. Abort if 928 // any operand will prevent folding. 929 unsigned MRInfo = 0; 930 SmallVector<unsigned, 2> FoldOps; 931 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 932 return false; 933 934 // The only time it's safe to fold into a two address instruction is when 935 // it's folding reload and spill from / into a spill stack slot. 936 if (DefMI && (MRInfo & VirtRegMap::isMod)) 937 return false; 938 939 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot) 940 : tii_->foldMemoryOperand(MI, FoldOps, DefMI); 941 if (fmi) { 942 // Remember this instruction uses the spill slot. 943 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 944 945 // Attempt to fold the memory reference into the instruction. If 946 // we can do this, we don't need to insert spill code. 947 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 948 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 949 vrm.transferSpillPts(MI, fmi); 950 vrm.transferRestorePts(MI, fmi); 951 vrm.transferEmergencySpills(MI, fmi); 952 ReplaceMachineInstrInMaps(MI, fmi); 953 MI->eraseFromParent(); 954 MI = fmi; 955 ++numFolds; 956 return true; 957 } 958 return false; 959} 960 961/// canFoldMemoryOperand - Returns true if the specified load / store 962/// folding is possible. 963bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 964 SmallVector<unsigned, 2> &Ops, 965 bool ReMat) const { 966 // Filter the list of operand indexes that are to be folded. Abort if 967 // any operand will prevent folding. 968 unsigned MRInfo = 0; 969 SmallVector<unsigned, 2> FoldOps; 970 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 971 return false; 972 973 // It's only legal to remat for a use, not a def. 974 if (ReMat && (MRInfo & VirtRegMap::isMod)) 975 return false; 976 977 return tii_->canFoldMemoryOperand(MI, FoldOps); 978} 979 980bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 981 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 982 983 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 984 985 if (mbb == 0) 986 return false; 987 988 for (++itr; itr != li.ranges.end(); ++itr) { 989 MachineBasicBlock *mbb2 = 990 indexes_->getMBBCoveringRange(itr->start, itr->end); 991 992 if (mbb2 != mbb) 993 return false; 994 } 995 996 return true; 997} 998 999/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1000/// interval on to-be re-materialized operands of MI) with new register. 1001void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1002 MachineInstr *MI, unsigned NewVReg, 1003 VirtRegMap &vrm) { 1004 // There is an implicit use. That means one of the other operand is 1005 // being remat'ed and the remat'ed instruction has li.reg as an 1006 // use operand. Make sure we rewrite that as well. 1007 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1008 MachineOperand &MO = MI->getOperand(i); 1009 if (!MO.isReg()) 1010 continue; 1011 unsigned Reg = MO.getReg(); 1012 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1013 continue; 1014 if (!vrm.isReMaterialized(Reg)) 1015 continue; 1016 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1017 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1018 if (UseMO) 1019 UseMO->setReg(NewVReg); 1020 } 1021} 1022 1023/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1024/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1025bool LiveIntervals:: 1026rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1027 bool TrySplit, SlotIndex index, SlotIndex end, 1028 MachineInstr *MI, 1029 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1030 unsigned Slot, int LdSlot, 1031 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1032 VirtRegMap &vrm, 1033 const TargetRegisterClass* rc, 1034 SmallVector<int, 4> &ReMatIds, 1035 const MachineLoopInfo *loopInfo, 1036 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1037 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1038 std::vector<LiveInterval*> &NewLIs) { 1039 bool CanFold = false; 1040 RestartInstruction: 1041 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1042 MachineOperand& mop = MI->getOperand(i); 1043 if (!mop.isReg()) 1044 continue; 1045 unsigned Reg = mop.getReg(); 1046 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1047 continue; 1048 if (Reg != li.reg) 1049 continue; 1050 1051 bool TryFold = !DefIsReMat; 1052 bool FoldSS = true; // Default behavior unless it's a remat. 1053 int FoldSlot = Slot; 1054 if (DefIsReMat) { 1055 // If this is the rematerializable definition MI itself and 1056 // all of its uses are rematerialized, simply delete it. 1057 if (MI == ReMatOrigDefMI && CanDelete) { 1058 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " 1059 << *MI << '\n'); 1060 RemoveMachineInstrFromMaps(MI); 1061 vrm.RemoveMachineInstrFromMaps(MI); 1062 MI->eraseFromParent(); 1063 break; 1064 } 1065 1066 // If def for this use can't be rematerialized, then try folding. 1067 // If def is rematerializable and it's a load, also try folding. 1068 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1069 if (isLoad) { 1070 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1071 FoldSS = isLoadSS; 1072 FoldSlot = LdSlot; 1073 } 1074 } 1075 1076 // Scan all of the operands of this instruction rewriting operands 1077 // to use NewVReg instead of li.reg as appropriate. We do this for 1078 // two reasons: 1079 // 1080 // 1. If the instr reads the same spilled vreg multiple times, we 1081 // want to reuse the NewVReg. 1082 // 2. If the instr is a two-addr instruction, we are required to 1083 // keep the src/dst regs pinned. 1084 // 1085 // Keep track of whether we replace a use and/or def so that we can 1086 // create the spill interval with the appropriate range. 1087 SmallVector<unsigned, 2> Ops; 1088 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops); 1089 1090 // Create a new virtual register for the spill interval. 1091 // Create the new register now so we can map the fold instruction 1092 // to the new register so when it is unfolded we get the correct 1093 // answer. 1094 bool CreatedNewVReg = false; 1095 if (NewVReg == 0) { 1096 NewVReg = mri_->createVirtualRegister(rc); 1097 vrm.grow(); 1098 CreatedNewVReg = true; 1099 1100 // The new virtual register should get the same allocation hints as the 1101 // old one. 1102 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1103 if (Hint.first || Hint.second) 1104 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1105 } 1106 1107 if (!TryFold) 1108 CanFold = false; 1109 else { 1110 // Do not fold load / store here if we are splitting. We'll find an 1111 // optimal point to insert a load / store later. 1112 if (!TrySplit) { 1113 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1114 Ops, FoldSS, FoldSlot, NewVReg)) { 1115 // Folding the load/store can completely change the instruction in 1116 // unpredictable ways, rescan it from the beginning. 1117 1118 if (FoldSS) { 1119 // We need to give the new vreg the same stack slot as the 1120 // spilled interval. 1121 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1122 } 1123 1124 HasUse = false; 1125 HasDef = false; 1126 CanFold = false; 1127 if (isNotInMIMap(MI)) 1128 break; 1129 goto RestartInstruction; 1130 } 1131 } else { 1132 // We'll try to fold it later if it's profitable. 1133 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1134 } 1135 } 1136 1137 mop.setReg(NewVReg); 1138 if (mop.isImplicit()) 1139 rewriteImplicitOps(li, MI, NewVReg, vrm); 1140 1141 // Reuse NewVReg for other reads. 1142 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1143 MachineOperand &mopj = MI->getOperand(Ops[j]); 1144 mopj.setReg(NewVReg); 1145 if (mopj.isImplicit()) 1146 rewriteImplicitOps(li, MI, NewVReg, vrm); 1147 } 1148 1149 if (CreatedNewVReg) { 1150 if (DefIsReMat) { 1151 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1152 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1153 // Each valnum may have its own remat id. 1154 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1155 } else { 1156 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1157 } 1158 if (!CanDelete || (HasUse && HasDef)) { 1159 // If this is a two-addr instruction then its use operands are 1160 // rematerializable but its def is not. It should be assigned a 1161 // stack slot. 1162 vrm.assignVirt2StackSlot(NewVReg, Slot); 1163 } 1164 } else { 1165 vrm.assignVirt2StackSlot(NewVReg, Slot); 1166 } 1167 } else if (HasUse && HasDef && 1168 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1169 // If this interval hasn't been assigned a stack slot (because earlier 1170 // def is a deleted remat def), do it now. 1171 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1172 vrm.assignVirt2StackSlot(NewVReg, Slot); 1173 } 1174 1175 // Re-matting an instruction with virtual register use. Add the 1176 // register as an implicit use on the use MI. 1177 if (DefIsReMat && ImpUse) 1178 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1179 1180 // Create a new register interval for this spill / remat. 1181 LiveInterval &nI = getOrCreateInterval(NewVReg); 1182 if (CreatedNewVReg) { 1183 NewLIs.push_back(&nI); 1184 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1185 if (TrySplit) 1186 vrm.setIsSplitFromReg(NewVReg, li.reg); 1187 } 1188 1189 if (HasUse) { 1190 if (CreatedNewVReg) { 1191 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1192 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1193 DEBUG(dbgs() << " +" << LR); 1194 nI.addRange(LR); 1195 } else { 1196 // Extend the split live interval to this def / use. 1197 SlotIndex End = index.getDefIndex(); 1198 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1199 nI.getValNumInfo(nI.getNumValNums()-1)); 1200 DEBUG(dbgs() << " +" << LR); 1201 nI.addRange(LR); 1202 } 1203 } 1204 if (HasDef) { 1205 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1206 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1207 DEBUG(dbgs() << " +" << LR); 1208 nI.addRange(LR); 1209 } 1210 1211 DEBUG({ 1212 dbgs() << "\t\t\t\tAdded new interval: "; 1213 nI.print(dbgs(), tri_); 1214 dbgs() << '\n'; 1215 }); 1216 } 1217 return CanFold; 1218} 1219bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1220 const VNInfo *VNI, 1221 MachineBasicBlock *MBB, 1222 SlotIndex Idx) const { 1223 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB)); 1224} 1225 1226/// RewriteInfo - Keep track of machine instrs that will be rewritten 1227/// during spilling. 1228namespace { 1229 struct RewriteInfo { 1230 SlotIndex Index; 1231 MachineInstr *MI; 1232 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {} 1233 }; 1234 1235 struct RewriteInfoCompare { 1236 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1237 return LHS.Index < RHS.Index; 1238 } 1239 }; 1240} 1241 1242void LiveIntervals:: 1243rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1244 LiveInterval::Ranges::const_iterator &I, 1245 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1246 unsigned Slot, int LdSlot, 1247 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1248 VirtRegMap &vrm, 1249 const TargetRegisterClass* rc, 1250 SmallVector<int, 4> &ReMatIds, 1251 const MachineLoopInfo *loopInfo, 1252 BitVector &SpillMBBs, 1253 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1254 BitVector &RestoreMBBs, 1255 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1256 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1257 std::vector<LiveInterval*> &NewLIs) { 1258 bool AllCanFold = true; 1259 unsigned NewVReg = 0; 1260 SlotIndex start = I->start.getBaseIndex(); 1261 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1262 1263 // First collect all the def / use in this live range that will be rewritten. 1264 // Make sure they are sorted according to instruction index. 1265 std::vector<RewriteInfo> RewriteMIs; 1266 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1267 re = mri_->reg_end(); ri != re; ) { 1268 MachineInstr *MI = &*ri; 1269 MachineOperand &O = ri.getOperand(); 1270 ++ri; 1271 if (MI->isDebugValue()) { 1272 // Modify DBG_VALUE now that the value is in a spill slot. 1273 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { 1274 uint64_t Offset = MI->getOperand(1).getImm(); 1275 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1276 DebugLoc DL = MI->getDebugLoc(); 1277 int FI = isLoadSS ? LdSlot : (int)Slot; 1278 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, 1279 Offset, MDPtr, DL)) { 1280 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1281 ReplaceMachineInstrInMaps(MI, NewDV); 1282 MachineBasicBlock *MBB = MI->getParent(); 1283 MBB->insert(MBB->erase(MI), NewDV); 1284 continue; 1285 } 1286 } 1287 1288 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1289 RemoveMachineInstrFromMaps(MI); 1290 vrm.RemoveMachineInstrFromMaps(MI); 1291 MI->eraseFromParent(); 1292 continue; 1293 } 1294 assert(!(O.isImplicit() && O.isUse()) && 1295 "Spilling register that's used as implicit use?"); 1296 SlotIndex index = getInstructionIndex(MI); 1297 if (index < start || index >= end) 1298 continue; 1299 1300 if (O.isUndef()) 1301 // Must be defined by an implicit def. It should not be spilled. Note, 1302 // this is for correctness reason. e.g. 1303 // 8 %reg1024<def> = IMPLICIT_DEF 1304 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1305 // The live range [12, 14) are not part of the r1024 live interval since 1306 // it's defined by an implicit def. It will not conflicts with live 1307 // interval of r1025. Now suppose both registers are spilled, you can 1308 // easily see a situation where both registers are reloaded before 1309 // the INSERT_SUBREG and both target registers that would overlap. 1310 continue; 1311 RewriteMIs.push_back(RewriteInfo(index, MI)); 1312 } 1313 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1314 1315 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1316 // Now rewrite the defs and uses. 1317 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1318 RewriteInfo &rwi = RewriteMIs[i]; 1319 ++i; 1320 SlotIndex index = rwi.Index; 1321 MachineInstr *MI = rwi.MI; 1322 // If MI def and/or use the same register multiple times, then there 1323 // are multiple entries. 1324 while (i != e && RewriteMIs[i].MI == MI) { 1325 assert(RewriteMIs[i].Index == index); 1326 ++i; 1327 } 1328 MachineBasicBlock *MBB = MI->getParent(); 1329 1330 if (ImpUse && MI != ReMatDefMI) { 1331 // Re-matting an instruction with virtual register use. Prevent interval 1332 // from being spilled. 1333 getInterval(ImpUse).markNotSpillable(); 1334 } 1335 1336 unsigned MBBId = MBB->getNumber(); 1337 unsigned ThisVReg = 0; 1338 if (TrySplit) { 1339 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1340 if (NVI != MBBVRegsMap.end()) { 1341 ThisVReg = NVI->second; 1342 // One common case: 1343 // x = use 1344 // ... 1345 // ... 1346 // def = ... 1347 // = use 1348 // It's better to start a new interval to avoid artifically 1349 // extend the new interval. 1350 if (MI->readsWritesVirtualRegister(li.reg) == 1351 std::make_pair(false,true)) { 1352 MBBVRegsMap.erase(MBB->getNumber()); 1353 ThisVReg = 0; 1354 } 1355 } 1356 } 1357 1358 bool IsNew = ThisVReg == 0; 1359 if (IsNew) { 1360 // This ends the previous live interval. If all of its def / use 1361 // can be folded, give it a low spill weight. 1362 if (NewVReg && TrySplit && AllCanFold) { 1363 LiveInterval &nI = getOrCreateInterval(NewVReg); 1364 nI.weight /= 10.0F; 1365 } 1366 AllCanFold = true; 1367 } 1368 NewVReg = ThisVReg; 1369 1370 bool HasDef = false; 1371 bool HasUse = false; 1372 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1373 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1374 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1375 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1376 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1377 if (!HasDef && !HasUse) 1378 continue; 1379 1380 AllCanFold &= CanFold; 1381 1382 // Update weight of spill interval. 1383 LiveInterval &nI = getOrCreateInterval(NewVReg); 1384 if (!TrySplit) { 1385 // The spill weight is now infinity as it cannot be spilled again. 1386 nI.markNotSpillable(); 1387 continue; 1388 } 1389 1390 // Keep track of the last def and first use in each MBB. 1391 if (HasDef) { 1392 if (MI != ReMatOrigDefMI || !CanDelete) { 1393 bool HasKill = false; 1394 if (!HasUse) 1395 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1396 else { 1397 // If this is a two-address code, then this index starts a new VNInfo. 1398 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1399 if (VNI) 1400 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1401 } 1402 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1403 SpillIdxes.find(MBBId); 1404 if (!HasKill) { 1405 if (SII == SpillIdxes.end()) { 1406 std::vector<SRInfo> S; 1407 S.push_back(SRInfo(index, NewVReg, true)); 1408 SpillIdxes.insert(std::make_pair(MBBId, S)); 1409 } else if (SII->second.back().vreg != NewVReg) { 1410 SII->second.push_back(SRInfo(index, NewVReg, true)); 1411 } else if (index > SII->second.back().index) { 1412 // If there is an earlier def and this is a two-address 1413 // instruction, then it's not possible to fold the store (which 1414 // would also fold the load). 1415 SRInfo &Info = SII->second.back(); 1416 Info.index = index; 1417 Info.canFold = !HasUse; 1418 } 1419 SpillMBBs.set(MBBId); 1420 } else if (SII != SpillIdxes.end() && 1421 SII->second.back().vreg == NewVReg && 1422 index > SII->second.back().index) { 1423 // There is an earlier def that's not killed (must be two-address). 1424 // The spill is no longer needed. 1425 SII->second.pop_back(); 1426 if (SII->second.empty()) { 1427 SpillIdxes.erase(MBBId); 1428 SpillMBBs.reset(MBBId); 1429 } 1430 } 1431 } 1432 } 1433 1434 if (HasUse) { 1435 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1436 SpillIdxes.find(MBBId); 1437 if (SII != SpillIdxes.end() && 1438 SII->second.back().vreg == NewVReg && 1439 index > SII->second.back().index) 1440 // Use(s) following the last def, it's not safe to fold the spill. 1441 SII->second.back().canFold = false; 1442 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1443 RestoreIdxes.find(MBBId); 1444 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1445 // If we are splitting live intervals, only fold if it's the first 1446 // use and there isn't another use later in the MBB. 1447 RII->second.back().canFold = false; 1448 else if (IsNew) { 1449 // Only need a reload if there isn't an earlier def / use. 1450 if (RII == RestoreIdxes.end()) { 1451 std::vector<SRInfo> Infos; 1452 Infos.push_back(SRInfo(index, NewVReg, true)); 1453 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1454 } else { 1455 RII->second.push_back(SRInfo(index, NewVReg, true)); 1456 } 1457 RestoreMBBs.set(MBBId); 1458 } 1459 } 1460 1461 // Update spill weight. 1462 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1463 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1464 } 1465 1466 if (NewVReg && TrySplit && AllCanFold) { 1467 // If all of its def / use can be folded, give it a low spill weight. 1468 LiveInterval &nI = getOrCreateInterval(NewVReg); 1469 nI.weight /= 10.0F; 1470 } 1471} 1472 1473bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1474 unsigned vr, BitVector &RestoreMBBs, 1475 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1476 if (!RestoreMBBs[Id]) 1477 return false; 1478 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1479 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1480 if (Restores[i].index == index && 1481 Restores[i].vreg == vr && 1482 Restores[i].canFold) 1483 return true; 1484 return false; 1485} 1486 1487void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1488 unsigned vr, BitVector &RestoreMBBs, 1489 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1490 if (!RestoreMBBs[Id]) 1491 return; 1492 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1493 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1494 if (Restores[i].index == index && Restores[i].vreg) 1495 Restores[i].index = SlotIndex(); 1496} 1497 1498/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1499/// spilled and create empty intervals for their uses. 1500void 1501LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1502 const TargetRegisterClass* rc, 1503 std::vector<LiveInterval*> &NewLIs) { 1504 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1505 re = mri_->reg_end(); ri != re; ) { 1506 MachineOperand &O = ri.getOperand(); 1507 MachineInstr *MI = &*ri; 1508 ++ri; 1509 if (MI->isDebugValue()) { 1510 // Remove debug info for now. 1511 O.setReg(0U); 1512 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1513 continue; 1514 } 1515 if (O.isDef()) { 1516 assert(MI->isImplicitDef() && 1517 "Register def was not rewritten?"); 1518 RemoveMachineInstrFromMaps(MI); 1519 vrm.RemoveMachineInstrFromMaps(MI); 1520 MI->eraseFromParent(); 1521 } else { 1522 // This must be an use of an implicit_def so it's not part of the live 1523 // interval. Create a new empty live interval for it. 1524 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1525 unsigned NewVReg = mri_->createVirtualRegister(rc); 1526 vrm.grow(); 1527 vrm.setIsImplicitlyDefined(NewVReg); 1528 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1529 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1530 MachineOperand &MO = MI->getOperand(i); 1531 if (MO.isReg() && MO.getReg() == li.reg) { 1532 MO.setReg(NewVReg); 1533 MO.setIsUndef(); 1534 } 1535 } 1536 } 1537 } 1538} 1539 1540float 1541LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 1542 // Limit the loop depth ridiculousness. 1543 if (loopDepth > 200) 1544 loopDepth = 200; 1545 1546 // The loop depth is used to roughly estimate the number of times the 1547 // instruction is executed. Something like 10^d is simple, but will quickly 1548 // overflow a float. This expression behaves like 10^d for small d, but is 1549 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 1550 // headroom before overflow. 1551 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth); 1552 1553 return (isDef + isUse) * lc; 1554} 1555 1556void 1557LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) { 1558 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) 1559 normalizeSpillWeight(*NewLIs[i]); 1560} 1561 1562std::vector<LiveInterval*> LiveIntervals:: 1563addIntervalsForSpills(const LiveInterval &li, 1564 SmallVectorImpl<LiveInterval*> &SpillIs, 1565 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1566 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1567 1568 DEBUG({ 1569 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1570 li.print(dbgs(), tri_); 1571 dbgs() << '\n'; 1572 }); 1573 1574 // Each bit specify whether a spill is required in the MBB. 1575 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1576 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1577 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1578 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1579 DenseMap<unsigned,unsigned> MBBVRegsMap; 1580 std::vector<LiveInterval*> NewLIs; 1581 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1582 1583 unsigned NumValNums = li.getNumValNums(); 1584 SmallVector<MachineInstr*, 4> ReMatDefs; 1585 ReMatDefs.resize(NumValNums, NULL); 1586 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1587 ReMatOrigDefs.resize(NumValNums, NULL); 1588 SmallVector<int, 4> ReMatIds; 1589 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1590 BitVector ReMatDelete(NumValNums); 1591 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1592 1593 // Spilling a split live interval. It cannot be split any further. Also, 1594 // it's also guaranteed to be a single val# / range interval. 1595 if (vrm.getPreSplitReg(li.reg)) { 1596 vrm.setIsSplitFromReg(li.reg, 0); 1597 // Unset the split kill marker on the last use. 1598 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1599 if (KillIdx != SlotIndex()) { 1600 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1601 assert(KillMI && "Last use disappeared?"); 1602 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1603 assert(KillOp != -1 && "Last use disappeared?"); 1604 KillMI->getOperand(KillOp).setIsKill(false); 1605 } 1606 vrm.removeKillPoint(li.reg); 1607 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1608 Slot = vrm.getStackSlot(li.reg); 1609 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1610 MachineInstr *ReMatDefMI = DefIsReMat ? 1611 vrm.getReMaterializedMI(li.reg) : NULL; 1612 int LdSlot = 0; 1613 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1614 bool isLoad = isLoadSS || 1615 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1616 bool IsFirstRange = true; 1617 for (LiveInterval::Ranges::const_iterator 1618 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1619 // If this is a split live interval with multiple ranges, it means there 1620 // are two-address instructions that re-defined the value. Only the 1621 // first def can be rematerialized! 1622 if (IsFirstRange) { 1623 // Note ReMatOrigDefMI has already been deleted. 1624 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1625 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1626 false, vrm, rc, ReMatIds, loopInfo, 1627 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1628 MBBVRegsMap, NewLIs); 1629 } else { 1630 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1631 Slot, 0, false, false, false, 1632 false, vrm, rc, ReMatIds, loopInfo, 1633 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1634 MBBVRegsMap, NewLIs); 1635 } 1636 IsFirstRange = false; 1637 } 1638 1639 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1640 normalizeSpillWeights(NewLIs); 1641 return NewLIs; 1642 } 1643 1644 bool TrySplit = !intervalIsInOneMBB(li); 1645 if (TrySplit) 1646 ++numSplits; 1647 bool NeedStackSlot = false; 1648 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1649 i != e; ++i) { 1650 const VNInfo *VNI = *i; 1651 unsigned VN = VNI->id; 1652 if (VNI->isUnused()) 1653 continue; // Dead val#. 1654 // Is the def for the val# rematerializable? 1655 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1656 ? getInstructionFromIndex(VNI->def) : 0; 1657 bool dummy; 1658 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1659 // Remember how to remat the def of this val#. 1660 ReMatOrigDefs[VN] = ReMatDefMI; 1661 // Original def may be modified so we have to make a copy here. 1662 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1663 CloneMIs.push_back(Clone); 1664 ReMatDefs[VN] = Clone; 1665 1666 bool CanDelete = true; 1667 if (VNI->hasPHIKill()) { 1668 // A kill is a phi node, not all of its uses can be rematerialized. 1669 // It must not be deleted. 1670 CanDelete = false; 1671 // Need a stack slot if there is any live range where uses cannot be 1672 // rematerialized. 1673 NeedStackSlot = true; 1674 } 1675 if (CanDelete) 1676 ReMatDelete.set(VN); 1677 } else { 1678 // Need a stack slot if there is any live range where uses cannot be 1679 // rematerialized. 1680 NeedStackSlot = true; 1681 } 1682 } 1683 1684 // One stack slot per live interval. 1685 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1686 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1687 Slot = vrm.assignVirt2StackSlot(li.reg); 1688 1689 // This case only occurs when the prealloc splitter has already assigned 1690 // a stack slot to this vreg. 1691 else 1692 Slot = vrm.getStackSlot(li.reg); 1693 } 1694 1695 // Create new intervals and rewrite defs and uses. 1696 for (LiveInterval::Ranges::const_iterator 1697 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1698 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1699 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1700 bool DefIsReMat = ReMatDefMI != NULL; 1701 bool CanDelete = ReMatDelete[I->valno->id]; 1702 int LdSlot = 0; 1703 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1704 bool isLoad = isLoadSS || 1705 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1706 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1707 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1708 CanDelete, vrm, rc, ReMatIds, loopInfo, 1709 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1710 MBBVRegsMap, NewLIs); 1711 } 1712 1713 // Insert spills / restores if we are splitting. 1714 if (!TrySplit) { 1715 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1716 normalizeSpillWeights(NewLIs); 1717 return NewLIs; 1718 } 1719 1720 SmallPtrSet<LiveInterval*, 4> AddedKill; 1721 SmallVector<unsigned, 2> Ops; 1722 if (NeedStackSlot) { 1723 int Id = SpillMBBs.find_first(); 1724 while (Id != -1) { 1725 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1726 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1727 SlotIndex index = spills[i].index; 1728 unsigned VReg = spills[i].vreg; 1729 LiveInterval &nI = getOrCreateInterval(VReg); 1730 bool isReMat = vrm.isReMaterialized(VReg); 1731 MachineInstr *MI = getInstructionFromIndex(index); 1732 bool CanFold = false; 1733 bool FoundUse = false; 1734 Ops.clear(); 1735 if (spills[i].canFold) { 1736 CanFold = true; 1737 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1738 MachineOperand &MO = MI->getOperand(j); 1739 if (!MO.isReg() || MO.getReg() != VReg) 1740 continue; 1741 1742 Ops.push_back(j); 1743 if (MO.isDef()) 1744 continue; 1745 if (isReMat || 1746 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1747 RestoreMBBs, RestoreIdxes))) { 1748 // MI has two-address uses of the same register. If the use 1749 // isn't the first and only use in the BB, then we can't fold 1750 // it. FIXME: Move this to rewriteInstructionsForSpills. 1751 CanFold = false; 1752 break; 1753 } 1754 FoundUse = true; 1755 } 1756 } 1757 // Fold the store into the def if possible. 1758 bool Folded = false; 1759 if (CanFold && !Ops.empty()) { 1760 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1761 Folded = true; 1762 if (FoundUse) { 1763 // Also folded uses, do not issue a load. 1764 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1765 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1766 } 1767 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1768 } 1769 } 1770 1771 // Otherwise tell the spiller to issue a spill. 1772 if (!Folded) { 1773 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1774 bool isKill = LR->end == index.getStoreIndex(); 1775 if (!MI->registerDefIsDead(nI.reg)) 1776 // No need to spill a dead def. 1777 vrm.addSpillPoint(VReg, isKill, MI); 1778 if (isKill) 1779 AddedKill.insert(&nI); 1780 } 1781 } 1782 Id = SpillMBBs.find_next(Id); 1783 } 1784 } 1785 1786 int Id = RestoreMBBs.find_first(); 1787 while (Id != -1) { 1788 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1789 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1790 SlotIndex index = restores[i].index; 1791 if (index == SlotIndex()) 1792 continue; 1793 unsigned VReg = restores[i].vreg; 1794 LiveInterval &nI = getOrCreateInterval(VReg); 1795 bool isReMat = vrm.isReMaterialized(VReg); 1796 MachineInstr *MI = getInstructionFromIndex(index); 1797 bool CanFold = false; 1798 Ops.clear(); 1799 if (restores[i].canFold) { 1800 CanFold = true; 1801 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1802 MachineOperand &MO = MI->getOperand(j); 1803 if (!MO.isReg() || MO.getReg() != VReg) 1804 continue; 1805 1806 if (MO.isDef()) { 1807 // If this restore were to be folded, it would have been folded 1808 // already. 1809 CanFold = false; 1810 break; 1811 } 1812 Ops.push_back(j); 1813 } 1814 } 1815 1816 // Fold the load into the use if possible. 1817 bool Folded = false; 1818 if (CanFold && !Ops.empty()) { 1819 if (!isReMat) 1820 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1821 else { 1822 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1823 int LdSlot = 0; 1824 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1825 // If the rematerializable def is a load, also try to fold it. 1826 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1827 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1828 Ops, isLoadSS, LdSlot, VReg); 1829 if (!Folded) { 1830 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1831 if (ImpUse) { 1832 // Re-matting an instruction with virtual register use. Add the 1833 // register as an implicit use on the use MI and mark the register 1834 // interval as unspillable. 1835 LiveInterval &ImpLi = getInterval(ImpUse); 1836 ImpLi.markNotSpillable(); 1837 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1838 } 1839 } 1840 } 1841 } 1842 // If folding is not possible / failed, then tell the spiller to issue a 1843 // load / rematerialization for us. 1844 if (Folded) 1845 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1846 else 1847 vrm.addRestorePoint(VReg, MI); 1848 } 1849 Id = RestoreMBBs.find_next(Id); 1850 } 1851 1852 // Finalize intervals: add kills, finalize spill weights, and filter out 1853 // dead intervals. 1854 std::vector<LiveInterval*> RetNewLIs; 1855 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1856 LiveInterval *LI = NewLIs[i]; 1857 if (!LI->empty()) { 1858 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 1859 if (!AddedKill.count(LI)) { 1860 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1861 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 1862 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1863 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1864 assert(UseIdx != -1); 1865 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 1866 LastUse->getOperand(UseIdx).setIsKill(); 1867 vrm.addKillPoint(LI->reg, LastUseIdx); 1868 } 1869 } 1870 RetNewLIs.push_back(LI); 1871 } 1872 } 1873 1874 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1875 normalizeSpillWeights(RetNewLIs); 1876 return RetNewLIs; 1877} 1878 1879/// hasAllocatableSuperReg - Return true if the specified physical register has 1880/// any super register that's allocatable. 1881bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1882 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1883 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1884 return true; 1885 return false; 1886} 1887 1888/// getRepresentativeReg - Find the largest super register of the specified 1889/// physical register. 1890unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1891 // Find the largest super-register that is allocatable. 1892 unsigned BestReg = Reg; 1893 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1894 unsigned SuperReg = *AS; 1895 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1896 BestReg = SuperReg; 1897 break; 1898 } 1899 } 1900 return BestReg; 1901} 1902 1903/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1904/// specified interval that conflicts with the specified physical register. 1905unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1906 unsigned PhysReg) const { 1907 unsigned NumConflicts = 0; 1908 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 1909 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1910 E = mri_->reg_end(); I != E; ++I) { 1911 MachineOperand &O = I.getOperand(); 1912 MachineInstr *MI = O.getParent(); 1913 if (MI->isDebugValue()) 1914 continue; 1915 SlotIndex Index = getInstructionIndex(MI); 1916 if (pli.liveAt(Index)) 1917 ++NumConflicts; 1918 } 1919 return NumConflicts; 1920} 1921 1922/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 1923/// around all defs and uses of the specified interval. Return true if it 1924/// was able to cut its interval. 1925bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 1926 unsigned PhysReg, VirtRegMap &vrm) { 1927 unsigned SpillReg = getRepresentativeReg(PhysReg); 1928 1929 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 1930 // If there are registers which alias PhysReg, but which are not a 1931 // sub-register of the chosen representative super register. Assert 1932 // since we can't handle it yet. 1933 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 1934 tri_->isSuperRegister(*AS, SpillReg)); 1935 1936 bool Cut = false; 1937 SmallVector<unsigned, 4> PRegs; 1938 if (hasInterval(SpillReg)) 1939 PRegs.push_back(SpillReg); 1940 else { 1941 SmallSet<unsigned, 4> Added; 1942 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 1943 if (Added.insert(*AS) && hasInterval(*AS)) { 1944 PRegs.push_back(*AS); 1945 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 1946 Added.insert(*ASS); 1947 } 1948 } 1949 1950 SmallPtrSet<MachineInstr*, 8> SeenMIs; 1951 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1952 E = mri_->reg_end(); I != E; ++I) { 1953 MachineOperand &O = I.getOperand(); 1954 MachineInstr *MI = O.getParent(); 1955 if (MI->isDebugValue() || SeenMIs.count(MI)) 1956 continue; 1957 SeenMIs.insert(MI); 1958 SlotIndex Index = getInstructionIndex(MI); 1959 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 1960 unsigned PReg = PRegs[i]; 1961 LiveInterval &pli = getInterval(PReg); 1962 if (!pli.liveAt(Index)) 1963 continue; 1964 vrm.addEmergencySpill(PReg, MI); 1965 SlotIndex StartIdx = Index.getLoadIndex(); 1966 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 1967 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 1968 pli.removeRange(StartIdx, EndIdx); 1969 Cut = true; 1970 } else { 1971 std::string msg; 1972 raw_string_ostream Msg(msg); 1973 Msg << "Ran out of registers during register allocation!"; 1974 if (MI->isInlineAsm()) { 1975 Msg << "\nPlease check your inline asm statement for invalid " 1976 << "constraints:\n"; 1977 MI->print(Msg, tm_); 1978 } 1979 report_fatal_error(Msg.str()); 1980 } 1981 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 1982 if (!hasInterval(*AS)) 1983 continue; 1984 LiveInterval &spli = getInterval(*AS); 1985 if (spli.liveAt(Index)) 1986 spli.removeRange(Index.getLoadIndex(), 1987 Index.getNextIndex().getBaseIndex()); 1988 } 1989 } 1990 } 1991 return Cut; 1992} 1993 1994LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 1995 MachineInstr* startInst) { 1996 LiveInterval& Interval = getOrCreateInterval(reg); 1997 VNInfo* VN = Interval.getNextValue( 1998 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 1999 startInst, true, getVNInfoAllocator()); 2000 VN->setHasPHIKill(true); 2001 LiveRange LR( 2002 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2003 getMBBEndIdx(startInst->getParent()), VN); 2004 Interval.addRange(LR); 2005 2006 return LR; 2007} 2008 2009