LiveIntervalAnalysis.cpp revision 04c528a0c86ddf3d6a70681f72e1b2ec07b0b53a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53STATISTIC(numIntervals , "Number of original intervals"); 54STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 55STATISTIC(numSplits , "Number of intervals split"); 56 57char LiveIntervals::ID = 0; 58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 59 60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.setPreservesCFG(); 62 AU.addRequired<AliasAnalysis>(); 63 AU.addPreserved<AliasAnalysis>(); 64 AU.addPreserved<LiveVariables>(); 65 AU.addRequired<LiveVariables>(); 66 AU.addPreservedID(MachineLoopInfoID); 67 AU.addPreservedID(MachineDominatorsID); 68 69 if (!StrongPHIElim) { 70 AU.addPreservedID(PHIEliminationID); 71 AU.addRequiredID(PHIEliminationID); 72 } 73 74 AU.addRequiredID(TwoAddressInstructionPassID); 75 AU.addPreserved<ProcessImplicitDefs>(); 76 AU.addRequired<ProcessImplicitDefs>(); 77 AU.addPreserved<SlotIndexes>(); 78 AU.addRequiredTransitive<SlotIndexes>(); 79 MachineFunctionPass::getAnalysisUsage(AU); 80} 81 82void LiveIntervals::releaseMemory() { 83 // Free the live intervals themselves. 84 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 85 E = r2iMap_.end(); I != E; ++I) 86 delete I->second; 87 88 r2iMap_.clear(); 89 90 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. 91 VNInfoAllocator.Reset(); 92 while (!CloneMIs.empty()) { 93 MachineInstr *MI = CloneMIs.back(); 94 CloneMIs.pop_back(); 95 mf_->DeleteMachineInstr(MI); 96 } 97} 98 99/// runOnMachineFunction - Register allocate the whole function 100/// 101bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 102 mf_ = &fn; 103 mri_ = &mf_->getRegInfo(); 104 tm_ = &fn.getTarget(); 105 tri_ = tm_->getRegisterInfo(); 106 tii_ = tm_->getInstrInfo(); 107 aa_ = &getAnalysis<AliasAnalysis>(); 108 lv_ = &getAnalysis<LiveVariables>(); 109 indexes_ = &getAnalysis<SlotIndexes>(); 110 allocatableRegs_ = tri_->getAllocatableSet(fn); 111 112 computeIntervals(); 113 114 numIntervals += getNumIntervals(); 115 116 DEBUG(dump()); 117 return true; 118} 119 120/// print - Implement the dump method. 121void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 122 OS << "********** INTERVALS **********\n"; 123 for (const_iterator I = begin(), E = end(); I != E; ++I) { 124 I->second->print(OS, tri_); 125 OS << "\n"; 126 } 127 128 printInstrs(OS); 129} 130 131void LiveIntervals::printInstrs(raw_ostream &OS) const { 132 OS << "********** MACHINEINSTRS **********\n"; 133 134 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 135 mbbi != mbbe; ++mbbi) { 136 OS << "BB#" << mbbi->getNumber() 137 << ":\t\t# derived from " << mbbi->getName() << "\n"; 138 for (MachineBasicBlock::iterator mii = mbbi->begin(), 139 mie = mbbi->end(); mii != mie; ++mii) { 140 if (mii->isDebugValue()) 141 OS << " \t" << *mii; 142 else 143 OS << getInstructionIndex(mii) << '\t' << *mii; 144 } 145 } 146} 147 148void LiveIntervals::dumpInstrs() const { 149 printInstrs(dbgs()); 150} 151 152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 153 VirtRegMap &vrm, unsigned reg) { 154 // We don't handle fancy stuff crossing basic block boundaries 155 if (li.ranges.size() != 1) 156 return true; 157 const LiveRange &range = li.ranges.front(); 158 SlotIndex idx = range.start.getBaseIndex(); 159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 160 161 // Skip deleted instructions 162 MachineInstr *firstMI = getInstructionFromIndex(idx); 163 while (!firstMI && idx != end) { 164 idx = idx.getNextIndex(); 165 firstMI = getInstructionFromIndex(idx); 166 } 167 if (!firstMI) 168 return false; 169 170 // Find last instruction in range 171 SlotIndex lastIdx = end.getPrevIndex(); 172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 173 while (!lastMI && lastIdx != idx) { 174 lastIdx = lastIdx.getPrevIndex(); 175 lastMI = getInstructionFromIndex(lastIdx); 176 } 177 if (!lastMI) 178 return false; 179 180 // Range cannot cross basic block boundaries or terminators 181 MachineBasicBlock *MBB = firstMI->getParent(); 182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 183 return true; 184 185 MachineBasicBlock::const_iterator E = lastMI; 186 ++E; 187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 188 const MachineInstr &MI = *I; 189 190 // Allow copies to and from li.reg 191 if (MI.isCopy()) 192 if (MI.getOperand(0).getReg() == li.reg || 193 MI.getOperand(1).getReg() == li.reg) 194 continue; 195 196 // Check for operands using reg 197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 198 const MachineOperand& mop = MI.getOperand(i); 199 if (!mop.isReg()) 200 continue; 201 unsigned PhysReg = mop.getReg(); 202 if (PhysReg == 0 || PhysReg == li.reg) 203 continue; 204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 205 if (!vrm.hasPhys(PhysReg)) 206 continue; 207 PhysReg = vrm.getPhys(PhysReg); 208 } 209 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 210 return true; 211 } 212 } 213 214 // No conflicts found. 215 return false; 216} 217 218bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg, 219 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 220 for (LiveInterval::Ranges::const_iterator 221 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 222 for (SlotIndex index = I->start.getBaseIndex(), 223 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 224 index != end; 225 index = index.getNextIndex()) { 226 MachineInstr *MI = getInstructionFromIndex(index); 227 if (!MI) 228 continue; // skip deleted instructions 229 230 if (JoinedCopies.count(MI)) 231 continue; 232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 233 MachineOperand& MO = MI->getOperand(i); 234 if (!MO.isReg()) 235 continue; 236 unsigned PhysReg = MO.getReg(); 237 if (PhysReg == 0 || PhysReg == Reg || 238 TargetRegisterInfo::isVirtualRegister(PhysReg)) 239 continue; 240 if (tri_->regsOverlap(Reg, PhysReg)) 241 return true; 242 } 243 } 244 } 245 246 return false; 247} 248 249#ifndef NDEBUG 250static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 251 if (TargetRegisterInfo::isPhysicalRegister(reg)) 252 dbgs() << tri_->getName(reg); 253 else 254 dbgs() << "%reg" << reg; 255} 256#endif 257 258static 259bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 260 unsigned Reg = MI.getOperand(MOIdx).getReg(); 261 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 262 const MachineOperand &MO = MI.getOperand(i); 263 if (!MO.isReg()) 264 continue; 265 if (MO.getReg() == Reg && MO.isDef()) { 266 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 267 MI.getOperand(MOIdx).getSubReg() && 268 (MO.getSubReg() || MO.isImplicit())); 269 return true; 270 } 271 } 272 return false; 273} 274 275/// isPartialRedef - Return true if the specified def at the specific index is 276/// partially re-defining the specified live interval. A common case of this is 277/// a definition of the sub-register. 278bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 279 LiveInterval &interval) { 280 if (!MO.getSubReg() || MO.isEarlyClobber()) 281 return false; 282 283 SlotIndex RedefIndex = MIIdx.getDefIndex(); 284 const LiveRange *OldLR = 285 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 286 if (OldLR->valno->isDefAccurate()) { 287 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 288 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 289 } 290 return false; 291} 292 293void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 294 MachineBasicBlock::iterator mi, 295 SlotIndex MIIdx, 296 MachineOperand& MO, 297 unsigned MOIdx, 298 LiveInterval &interval) { 299 DEBUG({ 300 dbgs() << "\t\tregister: "; 301 printRegName(interval.reg, tri_); 302 }); 303 304 // Virtual registers may be defined multiple times (due to phi 305 // elimination and 2-addr elimination). Much of what we do only has to be 306 // done once for the vreg. We use an empty interval to detect the first 307 // time we see a vreg. 308 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 309 if (interval.empty()) { 310 // Get the Idx of the defining instructions. 311 SlotIndex defIndex = MIIdx.getDefIndex(); 312 // Earlyclobbers move back one, so that they overlap the live range 313 // of inputs. 314 if (MO.isEarlyClobber()) 315 defIndex = MIIdx.getUseIndex(); 316 317 // Make sure the first definition is not a partial redefinition. Add an 318 // <imp-def> of the full register. 319 if (MO.getSubReg()) 320 mi->addRegisterDefined(interval.reg); 321 322 MachineInstr *CopyMI = NULL; 323 if (mi->isCopyLike()) { 324 CopyMI = mi; 325 } 326 327 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, 328 VNInfoAllocator); 329 assert(ValNo->id == 0 && "First value in interval is not 0?"); 330 331 // Loop over all of the blocks that the vreg is defined in. There are 332 // two cases we have to handle here. The most common case is a vreg 333 // whose lifetime is contained within a basic block. In this case there 334 // will be a single kill, in MBB, which comes after the definition. 335 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 336 // FIXME: what about dead vars? 337 SlotIndex killIdx; 338 if (vi.Kills[0] != mi) 339 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 340 else 341 killIdx = defIndex.getStoreIndex(); 342 343 // If the kill happens after the definition, we have an intra-block 344 // live range. 345 if (killIdx > defIndex) { 346 assert(vi.AliveBlocks.empty() && 347 "Shouldn't be alive across any blocks!"); 348 LiveRange LR(defIndex, killIdx, ValNo); 349 interval.addRange(LR); 350 DEBUG(dbgs() << " +" << LR << "\n"); 351 return; 352 } 353 } 354 355 // The other case we handle is when a virtual register lives to the end 356 // of the defining block, potentially live across some blocks, then is 357 // live into some number of blocks, but gets killed. Start by adding a 358 // range that goes from this definition to the end of the defining block. 359 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 360 DEBUG(dbgs() << " +" << NewLR); 361 interval.addRange(NewLR); 362 363 bool PHIJoin = lv_->isPHIJoin(interval.reg); 364 365 if (PHIJoin) { 366 // A phi join register is killed at the end of the MBB and revived as a new 367 // valno in the killing blocks. 368 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 369 DEBUG(dbgs() << " phi-join"); 370 ValNo->setHasPHIKill(true); 371 } else { 372 // Iterate over all of the blocks that the variable is completely 373 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 374 // live interval. 375 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 376 E = vi.AliveBlocks.end(); I != E; ++I) { 377 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); 378 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); 379 interval.addRange(LR); 380 DEBUG(dbgs() << " +" << LR); 381 } 382 } 383 384 // Finally, this virtual register is live from the start of any killing 385 // block to the 'use' slot of the killing instruction. 386 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 387 MachineInstr *Kill = vi.Kills[i]; 388 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 389 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); 390 391 // Create interval with one of a NEW value number. Note that this value 392 // number isn't actually defined by an instruction, weird huh? :) 393 if (PHIJoin) { 394 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, 395 VNInfoAllocator); 396 ValNo->setIsPHIDef(true); 397 } 398 LiveRange LR(Start, killIdx, ValNo); 399 interval.addRange(LR); 400 DEBUG(dbgs() << " +" << LR); 401 } 402 403 } else { 404 if (MultipleDefsBySameMI(*mi, MOIdx)) 405 // Multiple defs of the same virtual register by the same instruction. 406 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 407 // This is likely due to elimination of REG_SEQUENCE instructions. Return 408 // here since there is nothing to do. 409 return; 410 411 // If this is the second time we see a virtual register definition, it 412 // must be due to phi elimination or two addr elimination. If this is 413 // the result of two address elimination, then the vreg is one of the 414 // def-and-use register operand. 415 416 // It may also be partial redef like this: 417 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 418 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 419 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 420 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 421 // If this is a two-address definition, then we have already processed 422 // the live range. The only problem is that we didn't realize there 423 // are actually two values in the live interval. Because of this we 424 // need to take the LiveRegion that defines this register and split it 425 // into two values. 426 SlotIndex RedefIndex = MIIdx.getDefIndex(); 427 if (MO.isEarlyClobber()) 428 RedefIndex = MIIdx.getUseIndex(); 429 430 const LiveRange *OldLR = 431 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 432 VNInfo *OldValNo = OldLR->valno; 433 SlotIndex DefIndex = OldValNo->def.getDefIndex(); 434 435 // Delete the previous value, which should be short and continuous, 436 // because the 2-addr copy must be in the same MBB as the redef. 437 interval.removeRange(DefIndex, RedefIndex); 438 439 // The new value number (#1) is defined by the instruction we claimed 440 // defined value #0. 441 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 442 false, // update at * 443 VNInfoAllocator); 444 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 445 446 // Value#0 is now defined by the 2-addr instruction. 447 OldValNo->def = RedefIndex; 448 OldValNo->setCopy(0); 449 450 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ... 451 if (PartReDef && mi->isCopyLike()) 452 OldValNo->setCopy(&*mi); 453 454 // Add the new live interval which replaces the range for the input copy. 455 LiveRange LR(DefIndex, RedefIndex, ValNo); 456 DEBUG(dbgs() << " replace range with " << LR); 457 interval.addRange(LR); 458 459 // If this redefinition is dead, we need to add a dummy unit live 460 // range covering the def slot. 461 if (MO.isDead()) 462 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 463 OldValNo)); 464 465 DEBUG({ 466 dbgs() << " RESULT: "; 467 interval.print(dbgs(), tri_); 468 }); 469 } else if (lv_->isPHIJoin(interval.reg)) { 470 // In the case of PHI elimination, each variable definition is only 471 // live until the end of the block. We've already taken care of the 472 // rest of the live range. 473 474 SlotIndex defIndex = MIIdx.getDefIndex(); 475 if (MO.isEarlyClobber()) 476 defIndex = MIIdx.getUseIndex(); 477 478 VNInfo *ValNo; 479 MachineInstr *CopyMI = NULL; 480 if (mi->isCopyLike()) 481 CopyMI = mi; 482 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 483 484 SlotIndex killIndex = getMBBEndIdx(mbb); 485 LiveRange LR(defIndex, killIndex, ValNo); 486 interval.addRange(LR); 487 ValNo->setHasPHIKill(true); 488 DEBUG(dbgs() << " phi-join +" << LR); 489 } else { 490 llvm_unreachable("Multiply defined register"); 491 } 492 } 493 494 DEBUG(dbgs() << '\n'); 495} 496 497void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 498 MachineBasicBlock::iterator mi, 499 SlotIndex MIIdx, 500 MachineOperand& MO, 501 LiveInterval &interval, 502 MachineInstr *CopyMI) { 503 // A physical register cannot be live across basic block, so its 504 // lifetime must end somewhere in its defining basic block. 505 DEBUG({ 506 dbgs() << "\t\tregister: "; 507 printRegName(interval.reg, tri_); 508 }); 509 510 SlotIndex baseIndex = MIIdx; 511 SlotIndex start = baseIndex.getDefIndex(); 512 // Earlyclobbers move back one. 513 if (MO.isEarlyClobber()) 514 start = MIIdx.getUseIndex(); 515 SlotIndex end = start; 516 517 // If it is not used after definition, it is considered dead at 518 // the instruction defining it. Hence its interval is: 519 // [defSlot(def), defSlot(def)+1) 520 // For earlyclobbers, the defSlot was pushed back one; the extra 521 // advance below compensates. 522 if (MO.isDead()) { 523 DEBUG(dbgs() << " dead"); 524 end = start.getStoreIndex(); 525 goto exit; 526 } 527 528 // If it is not dead on definition, it must be killed by a 529 // subsequent instruction. Hence its interval is: 530 // [defSlot(def), useSlot(kill)+1) 531 baseIndex = baseIndex.getNextIndex(); 532 while (++mi != MBB->end()) { 533 534 if (mi->isDebugValue()) 535 continue; 536 if (getInstructionFromIndex(baseIndex) == 0) 537 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 538 539 if (mi->killsRegister(interval.reg, tri_)) { 540 DEBUG(dbgs() << " killed"); 541 end = baseIndex.getDefIndex(); 542 goto exit; 543 } else { 544 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_); 545 if (DefIdx != -1) { 546 if (mi->isRegTiedToUseOperand(DefIdx)) { 547 // Two-address instruction. 548 end = baseIndex.getDefIndex(); 549 } else { 550 // Another instruction redefines the register before it is ever read. 551 // Then the register is essentially dead at the instruction that 552 // defines it. Hence its interval is: 553 // [defSlot(def), defSlot(def)+1) 554 DEBUG(dbgs() << " dead"); 555 end = start.getStoreIndex(); 556 } 557 goto exit; 558 } 559 } 560 561 baseIndex = baseIndex.getNextIndex(); 562 } 563 564 // The only case we should have a dead physreg here without a killing or 565 // instruction where we know it's dead is if it is live-in to the function 566 // and never used. Another possible case is the implicit use of the 567 // physical register has been deleted by two-address pass. 568 end = start.getStoreIndex(); 569 570exit: 571 assert(start < end && "did not find end of interval?"); 572 573 // Already exists? Extend old live interval. 574 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 575 bool Extend = OldLR != interval.end(); 576 VNInfo *ValNo = Extend 577 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 578 if (MO.isEarlyClobber() && Extend) 579 ValNo->setHasRedefByEC(true); 580 LiveRange LR(start, end, ValNo); 581 interval.addRange(LR); 582 DEBUG(dbgs() << " +" << LR << '\n'); 583} 584 585void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 586 MachineBasicBlock::iterator MI, 587 SlotIndex MIIdx, 588 MachineOperand& MO, 589 unsigned MOIdx) { 590 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 591 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 592 getOrCreateInterval(MO.getReg())); 593 else if (allocatableRegs_[MO.getReg()]) { 594 MachineInstr *CopyMI = NULL; 595 if (MI->isCopyLike()) 596 CopyMI = MI; 597 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 598 getOrCreateInterval(MO.getReg()), CopyMI); 599 // Def of a register also defines its sub-registers. 600 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 601 // If MI also modifies the sub-register explicitly, avoid processing it 602 // more than once. Do not pass in TRI here so it checks for exact match. 603 if (!MI->definesRegister(*AS)) 604 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 605 getOrCreateInterval(*AS), 0); 606 } 607} 608 609void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 610 SlotIndex MIIdx, 611 LiveInterval &interval, bool isAlias) { 612 DEBUG({ 613 dbgs() << "\t\tlivein register: "; 614 printRegName(interval.reg, tri_); 615 }); 616 617 // Look for kills, if it reaches a def before it's killed, then it shouldn't 618 // be considered a livein. 619 MachineBasicBlock::iterator mi = MBB->begin(); 620 MachineBasicBlock::iterator E = MBB->end(); 621 // Skip over DBG_VALUE at the start of the MBB. 622 if (mi != E && mi->isDebugValue()) { 623 while (++mi != E && mi->isDebugValue()) 624 ; 625 if (mi == E) 626 // MBB is empty except for DBG_VALUE's. 627 return; 628 } 629 630 SlotIndex baseIndex = MIIdx; 631 SlotIndex start = baseIndex; 632 if (getInstructionFromIndex(baseIndex) == 0) 633 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 634 635 SlotIndex end = baseIndex; 636 bool SeenDefUse = false; 637 638 while (mi != E) { 639 if (mi->killsRegister(interval.reg, tri_)) { 640 DEBUG(dbgs() << " killed"); 641 end = baseIndex.getDefIndex(); 642 SeenDefUse = true; 643 break; 644 } else if (mi->definesRegister(interval.reg, tri_)) { 645 // Another instruction redefines the register before it is ever read. 646 // Then the register is essentially dead at the instruction that defines 647 // it. Hence its interval is: 648 // [defSlot(def), defSlot(def)+1) 649 DEBUG(dbgs() << " dead"); 650 end = start.getStoreIndex(); 651 SeenDefUse = true; 652 break; 653 } 654 655 while (++mi != E && mi->isDebugValue()) 656 // Skip over DBG_VALUE. 657 ; 658 if (mi != E) 659 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 660 } 661 662 // Live-in register might not be used at all. 663 if (!SeenDefUse) { 664 if (isAlias) { 665 DEBUG(dbgs() << " dead"); 666 end = MIIdx.getStoreIndex(); 667 } else { 668 DEBUG(dbgs() << " live through"); 669 end = baseIndex; 670 } 671 } 672 673 VNInfo *vni = 674 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 675 0, false, VNInfoAllocator); 676 vni->setIsPHIDef(true); 677 LiveRange LR(start, end, vni); 678 679 interval.addRange(LR); 680 DEBUG(dbgs() << " +" << LR << '\n'); 681} 682 683/// computeIntervals - computes the live intervals for virtual 684/// registers. for some ordering of the machine instructions [1,N] a 685/// live interval is an interval [i, j) where 1 <= i <= j < N for 686/// which a variable is live 687void LiveIntervals::computeIntervals() { 688 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 689 << "********** Function: " 690 << ((Value*)mf_->getFunction())->getName() << '\n'); 691 692 SmallVector<unsigned, 8> UndefUses; 693 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 694 MBBI != E; ++MBBI) { 695 MachineBasicBlock *MBB = MBBI; 696 if (MBB->empty()) 697 continue; 698 699 // Track the index of the current machine instr. 700 SlotIndex MIIndex = getMBBStartIdx(MBB); 701 DEBUG(dbgs() << "BB#" << MBB->getNumber() 702 << ":\t\t# derived from " << MBB->getName() << "\n"); 703 704 // Create intervals for live-ins to this BB first. 705 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 706 LE = MBB->livein_end(); LI != LE; ++LI) { 707 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 708 // Multiple live-ins can alias the same register. 709 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 710 if (!hasInterval(*AS)) 711 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 712 true); 713 } 714 715 // Skip over empty initial indices. 716 if (getInstructionFromIndex(MIIndex) == 0) 717 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 718 719 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 720 MI != miEnd; ++MI) { 721 DEBUG(dbgs() << MIIndex << "\t" << *MI); 722 if (MI->isDebugValue()) 723 continue; 724 725 // Handle defs. 726 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 727 MachineOperand &MO = MI->getOperand(i); 728 if (!MO.isReg() || !MO.getReg()) 729 continue; 730 731 // handle register defs - build intervals 732 if (MO.isDef()) 733 handleRegisterDef(MBB, MI, MIIndex, MO, i); 734 else if (MO.isUndef()) 735 UndefUses.push_back(MO.getReg()); 736 } 737 738 // Move to the next instr slot. 739 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 740 } 741 } 742 743 // Create empty intervals for registers defined by implicit_def's (except 744 // for those implicit_def that define values which are liveout of their 745 // blocks. 746 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 747 unsigned UndefReg = UndefUses[i]; 748 (void)getOrCreateInterval(UndefReg); 749 } 750} 751 752LiveInterval* LiveIntervals::createInterval(unsigned reg) { 753 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 754 return new LiveInterval(reg, Weight); 755} 756 757/// dupInterval - Duplicate a live interval. The caller is responsible for 758/// managing the allocated memory. 759LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 760 LiveInterval *NewLI = createInterval(li->reg); 761 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 762 return NewLI; 763} 764 765//===----------------------------------------------------------------------===// 766// Register allocator hooks. 767// 768 769/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 770/// allow one) virtual register operand, then its uses are implicitly using 771/// the register. Returns the virtual register. 772unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 773 MachineInstr *MI) const { 774 unsigned RegOp = 0; 775 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 776 MachineOperand &MO = MI->getOperand(i); 777 if (!MO.isReg() || !MO.isUse()) 778 continue; 779 unsigned Reg = MO.getReg(); 780 if (Reg == 0 || Reg == li.reg) 781 continue; 782 783 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 784 !allocatableRegs_[Reg]) 785 continue; 786 // FIXME: For now, only remat MI with at most one register operand. 787 assert(!RegOp && 788 "Can't rematerialize instruction with multiple register operand!"); 789 RegOp = MO.getReg(); 790#ifndef NDEBUG 791 break; 792#endif 793 } 794 return RegOp; 795} 796 797/// isValNoAvailableAt - Return true if the val# of the specified interval 798/// which reaches the given instruction also reaches the specified use index. 799bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 800 SlotIndex UseIdx) const { 801 SlotIndex Index = getInstructionIndex(MI); 802 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 803 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 804 return UI != li.end() && UI->valno == ValNo; 805} 806 807/// isReMaterializable - Returns true if the definition MI of the specified 808/// val# of the specified interval is re-materializable. 809bool LiveIntervals::isReMaterializable(const LiveInterval &li, 810 const VNInfo *ValNo, MachineInstr *MI, 811 SmallVectorImpl<LiveInterval*> &SpillIs, 812 bool &isLoad) { 813 if (DisableReMat) 814 return false; 815 816 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 817 return false; 818 819 // Target-specific code can mark an instruction as being rematerializable 820 // if it has one virtual reg use, though it had better be something like 821 // a PIC base register which is likely to be live everywhere. 822 unsigned ImpUse = getReMatImplicitUse(li, MI); 823 if (ImpUse) { 824 const LiveInterval &ImpLi = getInterval(ImpUse); 825 for (MachineRegisterInfo::use_nodbg_iterator 826 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end(); 827 ri != re; ++ri) { 828 MachineInstr *UseMI = &*ri; 829 SlotIndex UseIdx = getInstructionIndex(UseMI); 830 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 831 continue; 832 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 833 return false; 834 } 835 836 // If a register operand of the re-materialized instruction is going to 837 // be spilled next, then it's not legal to re-materialize this instruction. 838 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 839 if (ImpUse == SpillIs[i]->reg) 840 return false; 841 } 842 return true; 843} 844 845/// isReMaterializable - Returns true if the definition MI of the specified 846/// val# of the specified interval is re-materializable. 847bool LiveIntervals::isReMaterializable(const LiveInterval &li, 848 const VNInfo *ValNo, MachineInstr *MI) { 849 SmallVector<LiveInterval*, 4> Dummy1; 850 bool Dummy2; 851 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 852} 853 854/// isReMaterializable - Returns true if every definition of MI of every 855/// val# of the specified interval is re-materializable. 856bool LiveIntervals::isReMaterializable(const LiveInterval &li, 857 SmallVectorImpl<LiveInterval*> &SpillIs, 858 bool &isLoad) { 859 isLoad = false; 860 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 861 i != e; ++i) { 862 const VNInfo *VNI = *i; 863 if (VNI->isUnused()) 864 continue; // Dead val#. 865 // Is the def for the val# rematerializable? 866 if (!VNI->isDefAccurate()) 867 return false; 868 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 869 bool DefIsLoad = false; 870 if (!ReMatDefMI || 871 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 872 return false; 873 isLoad |= DefIsLoad; 874 } 875 return true; 876} 877 878/// FilterFoldedOps - Filter out two-address use operands. Return 879/// true if it finds any issue with the operands that ought to prevent 880/// folding. 881static bool FilterFoldedOps(MachineInstr *MI, 882 SmallVector<unsigned, 2> &Ops, 883 unsigned &MRInfo, 884 SmallVector<unsigned, 2> &FoldOps) { 885 MRInfo = 0; 886 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 887 unsigned OpIdx = Ops[i]; 888 MachineOperand &MO = MI->getOperand(OpIdx); 889 // FIXME: fold subreg use. 890 if (MO.getSubReg()) 891 return true; 892 if (MO.isDef()) 893 MRInfo |= (unsigned)VirtRegMap::isMod; 894 else { 895 // Filter out two-address use operand(s). 896 if (MI->isRegTiedToDefOperand(OpIdx)) { 897 MRInfo = VirtRegMap::isModRef; 898 continue; 899 } 900 MRInfo |= (unsigned)VirtRegMap::isRef; 901 } 902 FoldOps.push_back(OpIdx); 903 } 904 return false; 905} 906 907 908/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 909/// slot / to reg or any rematerialized load into ith operand of specified 910/// MI. If it is successul, MI is updated with the newly created MI and 911/// returns true. 912bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 913 VirtRegMap &vrm, MachineInstr *DefMI, 914 SlotIndex InstrIdx, 915 SmallVector<unsigned, 2> &Ops, 916 bool isSS, int Slot, unsigned Reg) { 917 // If it is an implicit def instruction, just delete it. 918 if (MI->isImplicitDef()) { 919 RemoveMachineInstrFromMaps(MI); 920 vrm.RemoveMachineInstrFromMaps(MI); 921 MI->eraseFromParent(); 922 ++numFolds; 923 return true; 924 } 925 926 // Filter the list of operand indexes that are to be folded. Abort if 927 // any operand will prevent folding. 928 unsigned MRInfo = 0; 929 SmallVector<unsigned, 2> FoldOps; 930 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 931 return false; 932 933 // The only time it's safe to fold into a two address instruction is when 934 // it's folding reload and spill from / into a spill stack slot. 935 if (DefMI && (MRInfo & VirtRegMap::isMod)) 936 return false; 937 938 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot) 939 : tii_->foldMemoryOperand(MI, FoldOps, DefMI); 940 if (fmi) { 941 // Remember this instruction uses the spill slot. 942 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 943 944 // Attempt to fold the memory reference into the instruction. If 945 // we can do this, we don't need to insert spill code. 946 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 947 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 948 vrm.transferSpillPts(MI, fmi); 949 vrm.transferRestorePts(MI, fmi); 950 vrm.transferEmergencySpills(MI, fmi); 951 ReplaceMachineInstrInMaps(MI, fmi); 952 MI->eraseFromParent(); 953 MI = fmi; 954 ++numFolds; 955 return true; 956 } 957 return false; 958} 959 960/// canFoldMemoryOperand - Returns true if the specified load / store 961/// folding is possible. 962bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 963 SmallVector<unsigned, 2> &Ops, 964 bool ReMat) const { 965 // Filter the list of operand indexes that are to be folded. Abort if 966 // any operand will prevent folding. 967 unsigned MRInfo = 0; 968 SmallVector<unsigned, 2> FoldOps; 969 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 970 return false; 971 972 // It's only legal to remat for a use, not a def. 973 if (ReMat && (MRInfo & VirtRegMap::isMod)) 974 return false; 975 976 return tii_->canFoldMemoryOperand(MI, FoldOps); 977} 978 979bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 980 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 981 982 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 983 984 if (mbb == 0) 985 return false; 986 987 for (++itr; itr != li.ranges.end(); ++itr) { 988 MachineBasicBlock *mbb2 = 989 indexes_->getMBBCoveringRange(itr->start, itr->end); 990 991 if (mbb2 != mbb) 992 return false; 993 } 994 995 return true; 996} 997 998/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 999/// interval on to-be re-materialized operands of MI) with new register. 1000void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1001 MachineInstr *MI, unsigned NewVReg, 1002 VirtRegMap &vrm) { 1003 // There is an implicit use. That means one of the other operand is 1004 // being remat'ed and the remat'ed instruction has li.reg as an 1005 // use operand. Make sure we rewrite that as well. 1006 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1007 MachineOperand &MO = MI->getOperand(i); 1008 if (!MO.isReg()) 1009 continue; 1010 unsigned Reg = MO.getReg(); 1011 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1012 continue; 1013 if (!vrm.isReMaterialized(Reg)) 1014 continue; 1015 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1016 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1017 if (UseMO) 1018 UseMO->setReg(NewVReg); 1019 } 1020} 1021 1022/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1023/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1024bool LiveIntervals:: 1025rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1026 bool TrySplit, SlotIndex index, SlotIndex end, 1027 MachineInstr *MI, 1028 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1029 unsigned Slot, int LdSlot, 1030 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1031 VirtRegMap &vrm, 1032 const TargetRegisterClass* rc, 1033 SmallVector<int, 4> &ReMatIds, 1034 const MachineLoopInfo *loopInfo, 1035 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1036 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1037 std::vector<LiveInterval*> &NewLIs) { 1038 bool CanFold = false; 1039 RestartInstruction: 1040 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1041 MachineOperand& mop = MI->getOperand(i); 1042 if (!mop.isReg()) 1043 continue; 1044 unsigned Reg = mop.getReg(); 1045 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1046 continue; 1047 if (Reg != li.reg) 1048 continue; 1049 1050 bool TryFold = !DefIsReMat; 1051 bool FoldSS = true; // Default behavior unless it's a remat. 1052 int FoldSlot = Slot; 1053 if (DefIsReMat) { 1054 // If this is the rematerializable definition MI itself and 1055 // all of its uses are rematerialized, simply delete it. 1056 if (MI == ReMatOrigDefMI && CanDelete) { 1057 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " 1058 << *MI << '\n'); 1059 RemoveMachineInstrFromMaps(MI); 1060 vrm.RemoveMachineInstrFromMaps(MI); 1061 MI->eraseFromParent(); 1062 break; 1063 } 1064 1065 // If def for this use can't be rematerialized, then try folding. 1066 // If def is rematerializable and it's a load, also try folding. 1067 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1068 if (isLoad) { 1069 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1070 FoldSS = isLoadSS; 1071 FoldSlot = LdSlot; 1072 } 1073 } 1074 1075 // Scan all of the operands of this instruction rewriting operands 1076 // to use NewVReg instead of li.reg as appropriate. We do this for 1077 // two reasons: 1078 // 1079 // 1. If the instr reads the same spilled vreg multiple times, we 1080 // want to reuse the NewVReg. 1081 // 2. If the instr is a two-addr instruction, we are required to 1082 // keep the src/dst regs pinned. 1083 // 1084 // Keep track of whether we replace a use and/or def so that we can 1085 // create the spill interval with the appropriate range. 1086 SmallVector<unsigned, 2> Ops; 1087 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops); 1088 1089 // Create a new virtual register for the spill interval. 1090 // Create the new register now so we can map the fold instruction 1091 // to the new register so when it is unfolded we get the correct 1092 // answer. 1093 bool CreatedNewVReg = false; 1094 if (NewVReg == 0) { 1095 NewVReg = mri_->createVirtualRegister(rc); 1096 vrm.grow(); 1097 CreatedNewVReg = true; 1098 1099 // The new virtual register should get the same allocation hints as the 1100 // old one. 1101 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1102 if (Hint.first || Hint.second) 1103 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1104 } 1105 1106 if (!TryFold) 1107 CanFold = false; 1108 else { 1109 // Do not fold load / store here if we are splitting. We'll find an 1110 // optimal point to insert a load / store later. 1111 if (!TrySplit) { 1112 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1113 Ops, FoldSS, FoldSlot, NewVReg)) { 1114 // Folding the load/store can completely change the instruction in 1115 // unpredictable ways, rescan it from the beginning. 1116 1117 if (FoldSS) { 1118 // We need to give the new vreg the same stack slot as the 1119 // spilled interval. 1120 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1121 } 1122 1123 HasUse = false; 1124 HasDef = false; 1125 CanFold = false; 1126 if (isNotInMIMap(MI)) 1127 break; 1128 goto RestartInstruction; 1129 } 1130 } else { 1131 // We'll try to fold it later if it's profitable. 1132 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1133 } 1134 } 1135 1136 mop.setReg(NewVReg); 1137 if (mop.isImplicit()) 1138 rewriteImplicitOps(li, MI, NewVReg, vrm); 1139 1140 // Reuse NewVReg for other reads. 1141 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1142 MachineOperand &mopj = MI->getOperand(Ops[j]); 1143 mopj.setReg(NewVReg); 1144 if (mopj.isImplicit()) 1145 rewriteImplicitOps(li, MI, NewVReg, vrm); 1146 } 1147 1148 if (CreatedNewVReg) { 1149 if (DefIsReMat) { 1150 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1151 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1152 // Each valnum may have its own remat id. 1153 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1154 } else { 1155 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1156 } 1157 if (!CanDelete || (HasUse && HasDef)) { 1158 // If this is a two-addr instruction then its use operands are 1159 // rematerializable but its def is not. It should be assigned a 1160 // stack slot. 1161 vrm.assignVirt2StackSlot(NewVReg, Slot); 1162 } 1163 } else { 1164 vrm.assignVirt2StackSlot(NewVReg, Slot); 1165 } 1166 } else if (HasUse && HasDef && 1167 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1168 // If this interval hasn't been assigned a stack slot (because earlier 1169 // def is a deleted remat def), do it now. 1170 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1171 vrm.assignVirt2StackSlot(NewVReg, Slot); 1172 } 1173 1174 // Re-matting an instruction with virtual register use. Add the 1175 // register as an implicit use on the use MI. 1176 if (DefIsReMat && ImpUse) 1177 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1178 1179 // Create a new register interval for this spill / remat. 1180 LiveInterval &nI = getOrCreateInterval(NewVReg); 1181 if (CreatedNewVReg) { 1182 NewLIs.push_back(&nI); 1183 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1184 if (TrySplit) 1185 vrm.setIsSplitFromReg(NewVReg, li.reg); 1186 } 1187 1188 if (HasUse) { 1189 if (CreatedNewVReg) { 1190 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1191 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1192 DEBUG(dbgs() << " +" << LR); 1193 nI.addRange(LR); 1194 } else { 1195 // Extend the split live interval to this def / use. 1196 SlotIndex End = index.getDefIndex(); 1197 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1198 nI.getValNumInfo(nI.getNumValNums()-1)); 1199 DEBUG(dbgs() << " +" << LR); 1200 nI.addRange(LR); 1201 } 1202 } 1203 if (HasDef) { 1204 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1205 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1206 DEBUG(dbgs() << " +" << LR); 1207 nI.addRange(LR); 1208 } 1209 1210 DEBUG({ 1211 dbgs() << "\t\t\t\tAdded new interval: "; 1212 nI.print(dbgs(), tri_); 1213 dbgs() << '\n'; 1214 }); 1215 } 1216 return CanFold; 1217} 1218bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1219 const VNInfo *VNI, 1220 MachineBasicBlock *MBB, 1221 SlotIndex Idx) const { 1222 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB)); 1223} 1224 1225/// RewriteInfo - Keep track of machine instrs that will be rewritten 1226/// during spilling. 1227namespace { 1228 struct RewriteInfo { 1229 SlotIndex Index; 1230 MachineInstr *MI; 1231 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {} 1232 }; 1233 1234 struct RewriteInfoCompare { 1235 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1236 return LHS.Index < RHS.Index; 1237 } 1238 }; 1239} 1240 1241void LiveIntervals:: 1242rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1243 LiveInterval::Ranges::const_iterator &I, 1244 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1245 unsigned Slot, int LdSlot, 1246 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1247 VirtRegMap &vrm, 1248 const TargetRegisterClass* rc, 1249 SmallVector<int, 4> &ReMatIds, 1250 const MachineLoopInfo *loopInfo, 1251 BitVector &SpillMBBs, 1252 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1253 BitVector &RestoreMBBs, 1254 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1255 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1256 std::vector<LiveInterval*> &NewLIs) { 1257 bool AllCanFold = true; 1258 unsigned NewVReg = 0; 1259 SlotIndex start = I->start.getBaseIndex(); 1260 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1261 1262 // First collect all the def / use in this live range that will be rewritten. 1263 // Make sure they are sorted according to instruction index. 1264 std::vector<RewriteInfo> RewriteMIs; 1265 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1266 re = mri_->reg_end(); ri != re; ) { 1267 MachineInstr *MI = &*ri; 1268 MachineOperand &O = ri.getOperand(); 1269 ++ri; 1270 if (MI->isDebugValue()) { 1271 // Modify DBG_VALUE now that the value is in a spill slot. 1272 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { 1273 uint64_t Offset = MI->getOperand(1).getImm(); 1274 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1275 DebugLoc DL = MI->getDebugLoc(); 1276 int FI = isLoadSS ? LdSlot : (int)Slot; 1277 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, 1278 Offset, MDPtr, DL)) { 1279 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1280 ReplaceMachineInstrInMaps(MI, NewDV); 1281 MachineBasicBlock *MBB = MI->getParent(); 1282 MBB->insert(MBB->erase(MI), NewDV); 1283 continue; 1284 } 1285 } 1286 1287 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1288 RemoveMachineInstrFromMaps(MI); 1289 vrm.RemoveMachineInstrFromMaps(MI); 1290 MI->eraseFromParent(); 1291 continue; 1292 } 1293 assert(!(O.isImplicit() && O.isUse()) && 1294 "Spilling register that's used as implicit use?"); 1295 SlotIndex index = getInstructionIndex(MI); 1296 if (index < start || index >= end) 1297 continue; 1298 1299 if (O.isUndef()) 1300 // Must be defined by an implicit def. It should not be spilled. Note, 1301 // this is for correctness reason. e.g. 1302 // 8 %reg1024<def> = IMPLICIT_DEF 1303 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1304 // The live range [12, 14) are not part of the r1024 live interval since 1305 // it's defined by an implicit def. It will not conflicts with live 1306 // interval of r1025. Now suppose both registers are spilled, you can 1307 // easily see a situation where both registers are reloaded before 1308 // the INSERT_SUBREG and both target registers that would overlap. 1309 continue; 1310 RewriteMIs.push_back(RewriteInfo(index, MI)); 1311 } 1312 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1313 1314 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1315 // Now rewrite the defs and uses. 1316 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1317 RewriteInfo &rwi = RewriteMIs[i]; 1318 ++i; 1319 SlotIndex index = rwi.Index; 1320 MachineInstr *MI = rwi.MI; 1321 // If MI def and/or use the same register multiple times, then there 1322 // are multiple entries. 1323 while (i != e && RewriteMIs[i].MI == MI) { 1324 assert(RewriteMIs[i].Index == index); 1325 ++i; 1326 } 1327 MachineBasicBlock *MBB = MI->getParent(); 1328 1329 if (ImpUse && MI != ReMatDefMI) { 1330 // Re-matting an instruction with virtual register use. Prevent interval 1331 // from being spilled. 1332 getInterval(ImpUse).markNotSpillable(); 1333 } 1334 1335 unsigned MBBId = MBB->getNumber(); 1336 unsigned ThisVReg = 0; 1337 if (TrySplit) { 1338 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1339 if (NVI != MBBVRegsMap.end()) { 1340 ThisVReg = NVI->second; 1341 // One common case: 1342 // x = use 1343 // ... 1344 // ... 1345 // def = ... 1346 // = use 1347 // It's better to start a new interval to avoid artifically 1348 // extend the new interval. 1349 if (MI->readsWritesVirtualRegister(li.reg) == 1350 std::make_pair(false,true)) { 1351 MBBVRegsMap.erase(MBB->getNumber()); 1352 ThisVReg = 0; 1353 } 1354 } 1355 } 1356 1357 bool IsNew = ThisVReg == 0; 1358 if (IsNew) { 1359 // This ends the previous live interval. If all of its def / use 1360 // can be folded, give it a low spill weight. 1361 if (NewVReg && TrySplit && AllCanFold) { 1362 LiveInterval &nI = getOrCreateInterval(NewVReg); 1363 nI.weight /= 10.0F; 1364 } 1365 AllCanFold = true; 1366 } 1367 NewVReg = ThisVReg; 1368 1369 bool HasDef = false; 1370 bool HasUse = false; 1371 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1372 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1373 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1374 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1375 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1376 if (!HasDef && !HasUse) 1377 continue; 1378 1379 AllCanFold &= CanFold; 1380 1381 // Update weight of spill interval. 1382 LiveInterval &nI = getOrCreateInterval(NewVReg); 1383 if (!TrySplit) { 1384 // The spill weight is now infinity as it cannot be spilled again. 1385 nI.markNotSpillable(); 1386 continue; 1387 } 1388 1389 // Keep track of the last def and first use in each MBB. 1390 if (HasDef) { 1391 if (MI != ReMatOrigDefMI || !CanDelete) { 1392 bool HasKill = false; 1393 if (!HasUse) 1394 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1395 else { 1396 // If this is a two-address code, then this index starts a new VNInfo. 1397 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1398 if (VNI) 1399 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1400 } 1401 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1402 SpillIdxes.find(MBBId); 1403 if (!HasKill) { 1404 if (SII == SpillIdxes.end()) { 1405 std::vector<SRInfo> S; 1406 S.push_back(SRInfo(index, NewVReg, true)); 1407 SpillIdxes.insert(std::make_pair(MBBId, S)); 1408 } else if (SII->second.back().vreg != NewVReg) { 1409 SII->second.push_back(SRInfo(index, NewVReg, true)); 1410 } else if (index > SII->second.back().index) { 1411 // If there is an earlier def and this is a two-address 1412 // instruction, then it's not possible to fold the store (which 1413 // would also fold the load). 1414 SRInfo &Info = SII->second.back(); 1415 Info.index = index; 1416 Info.canFold = !HasUse; 1417 } 1418 SpillMBBs.set(MBBId); 1419 } else if (SII != SpillIdxes.end() && 1420 SII->second.back().vreg == NewVReg && 1421 index > SII->second.back().index) { 1422 // There is an earlier def that's not killed (must be two-address). 1423 // The spill is no longer needed. 1424 SII->second.pop_back(); 1425 if (SII->second.empty()) { 1426 SpillIdxes.erase(MBBId); 1427 SpillMBBs.reset(MBBId); 1428 } 1429 } 1430 } 1431 } 1432 1433 if (HasUse) { 1434 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1435 SpillIdxes.find(MBBId); 1436 if (SII != SpillIdxes.end() && 1437 SII->second.back().vreg == NewVReg && 1438 index > SII->second.back().index) 1439 // Use(s) following the last def, it's not safe to fold the spill. 1440 SII->second.back().canFold = false; 1441 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1442 RestoreIdxes.find(MBBId); 1443 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1444 // If we are splitting live intervals, only fold if it's the first 1445 // use and there isn't another use later in the MBB. 1446 RII->second.back().canFold = false; 1447 else if (IsNew) { 1448 // Only need a reload if there isn't an earlier def / use. 1449 if (RII == RestoreIdxes.end()) { 1450 std::vector<SRInfo> Infos; 1451 Infos.push_back(SRInfo(index, NewVReg, true)); 1452 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1453 } else { 1454 RII->second.push_back(SRInfo(index, NewVReg, true)); 1455 } 1456 RestoreMBBs.set(MBBId); 1457 } 1458 } 1459 1460 // Update spill weight. 1461 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1462 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1463 } 1464 1465 if (NewVReg && TrySplit && AllCanFold) { 1466 // If all of its def / use can be folded, give it a low spill weight. 1467 LiveInterval &nI = getOrCreateInterval(NewVReg); 1468 nI.weight /= 10.0F; 1469 } 1470} 1471 1472bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1473 unsigned vr, BitVector &RestoreMBBs, 1474 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1475 if (!RestoreMBBs[Id]) 1476 return false; 1477 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1478 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1479 if (Restores[i].index == index && 1480 Restores[i].vreg == vr && 1481 Restores[i].canFold) 1482 return true; 1483 return false; 1484} 1485 1486void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1487 unsigned vr, BitVector &RestoreMBBs, 1488 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1489 if (!RestoreMBBs[Id]) 1490 return; 1491 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1492 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1493 if (Restores[i].index == index && Restores[i].vreg) 1494 Restores[i].index = SlotIndex(); 1495} 1496 1497/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1498/// spilled and create empty intervals for their uses. 1499void 1500LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1501 const TargetRegisterClass* rc, 1502 std::vector<LiveInterval*> &NewLIs) { 1503 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1504 re = mri_->reg_end(); ri != re; ) { 1505 MachineOperand &O = ri.getOperand(); 1506 MachineInstr *MI = &*ri; 1507 ++ri; 1508 if (MI->isDebugValue()) { 1509 // Remove debug info for now. 1510 O.setReg(0U); 1511 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1512 continue; 1513 } 1514 if (O.isDef()) { 1515 assert(MI->isImplicitDef() && 1516 "Register def was not rewritten?"); 1517 RemoveMachineInstrFromMaps(MI); 1518 vrm.RemoveMachineInstrFromMaps(MI); 1519 MI->eraseFromParent(); 1520 } else { 1521 // This must be an use of an implicit_def so it's not part of the live 1522 // interval. Create a new empty live interval for it. 1523 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1524 unsigned NewVReg = mri_->createVirtualRegister(rc); 1525 vrm.grow(); 1526 vrm.setIsImplicitlyDefined(NewVReg); 1527 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1528 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1529 MachineOperand &MO = MI->getOperand(i); 1530 if (MO.isReg() && MO.getReg() == li.reg) { 1531 MO.setReg(NewVReg); 1532 MO.setIsUndef(); 1533 } 1534 } 1535 } 1536 } 1537} 1538 1539float 1540LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 1541 // Limit the loop depth ridiculousness. 1542 if (loopDepth > 200) 1543 loopDepth = 200; 1544 1545 // The loop depth is used to roughly estimate the number of times the 1546 // instruction is executed. Something like 10^d is simple, but will quickly 1547 // overflow a float. This expression behaves like 10^d for small d, but is 1548 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 1549 // headroom before overflow. 1550 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth); 1551 1552 return (isDef + isUse) * lc; 1553} 1554 1555void 1556LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) { 1557 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) 1558 normalizeSpillWeight(*NewLIs[i]); 1559} 1560 1561std::vector<LiveInterval*> LiveIntervals:: 1562addIntervalsForSpills(const LiveInterval &li, 1563 SmallVectorImpl<LiveInterval*> &SpillIs, 1564 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1565 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1566 1567 DEBUG({ 1568 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1569 li.print(dbgs(), tri_); 1570 dbgs() << '\n'; 1571 }); 1572 1573 // Each bit specify whether a spill is required in the MBB. 1574 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1575 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1576 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1577 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1578 DenseMap<unsigned,unsigned> MBBVRegsMap; 1579 std::vector<LiveInterval*> NewLIs; 1580 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1581 1582 unsigned NumValNums = li.getNumValNums(); 1583 SmallVector<MachineInstr*, 4> ReMatDefs; 1584 ReMatDefs.resize(NumValNums, NULL); 1585 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1586 ReMatOrigDefs.resize(NumValNums, NULL); 1587 SmallVector<int, 4> ReMatIds; 1588 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1589 BitVector ReMatDelete(NumValNums); 1590 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1591 1592 // Spilling a split live interval. It cannot be split any further. Also, 1593 // it's also guaranteed to be a single val# / range interval. 1594 if (vrm.getPreSplitReg(li.reg)) { 1595 vrm.setIsSplitFromReg(li.reg, 0); 1596 // Unset the split kill marker on the last use. 1597 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1598 if (KillIdx != SlotIndex()) { 1599 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1600 assert(KillMI && "Last use disappeared?"); 1601 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1602 assert(KillOp != -1 && "Last use disappeared?"); 1603 KillMI->getOperand(KillOp).setIsKill(false); 1604 } 1605 vrm.removeKillPoint(li.reg); 1606 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1607 Slot = vrm.getStackSlot(li.reg); 1608 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1609 MachineInstr *ReMatDefMI = DefIsReMat ? 1610 vrm.getReMaterializedMI(li.reg) : NULL; 1611 int LdSlot = 0; 1612 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1613 bool isLoad = isLoadSS || 1614 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1615 bool IsFirstRange = true; 1616 for (LiveInterval::Ranges::const_iterator 1617 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1618 // If this is a split live interval with multiple ranges, it means there 1619 // are two-address instructions that re-defined the value. Only the 1620 // first def can be rematerialized! 1621 if (IsFirstRange) { 1622 // Note ReMatOrigDefMI has already been deleted. 1623 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1624 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1625 false, vrm, rc, ReMatIds, loopInfo, 1626 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1627 MBBVRegsMap, NewLIs); 1628 } else { 1629 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1630 Slot, 0, false, false, false, 1631 false, vrm, rc, ReMatIds, loopInfo, 1632 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1633 MBBVRegsMap, NewLIs); 1634 } 1635 IsFirstRange = false; 1636 } 1637 1638 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1639 normalizeSpillWeights(NewLIs); 1640 return NewLIs; 1641 } 1642 1643 bool TrySplit = !intervalIsInOneMBB(li); 1644 if (TrySplit) 1645 ++numSplits; 1646 bool NeedStackSlot = false; 1647 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1648 i != e; ++i) { 1649 const VNInfo *VNI = *i; 1650 unsigned VN = VNI->id; 1651 if (VNI->isUnused()) 1652 continue; // Dead val#. 1653 // Is the def for the val# rematerializable? 1654 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1655 ? getInstructionFromIndex(VNI->def) : 0; 1656 bool dummy; 1657 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1658 // Remember how to remat the def of this val#. 1659 ReMatOrigDefs[VN] = ReMatDefMI; 1660 // Original def may be modified so we have to make a copy here. 1661 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1662 CloneMIs.push_back(Clone); 1663 ReMatDefs[VN] = Clone; 1664 1665 bool CanDelete = true; 1666 if (VNI->hasPHIKill()) { 1667 // A kill is a phi node, not all of its uses can be rematerialized. 1668 // It must not be deleted. 1669 CanDelete = false; 1670 // Need a stack slot if there is any live range where uses cannot be 1671 // rematerialized. 1672 NeedStackSlot = true; 1673 } 1674 if (CanDelete) 1675 ReMatDelete.set(VN); 1676 } else { 1677 // Need a stack slot if there is any live range where uses cannot be 1678 // rematerialized. 1679 NeedStackSlot = true; 1680 } 1681 } 1682 1683 // One stack slot per live interval. 1684 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1685 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1686 Slot = vrm.assignVirt2StackSlot(li.reg); 1687 1688 // This case only occurs when the prealloc splitter has already assigned 1689 // a stack slot to this vreg. 1690 else 1691 Slot = vrm.getStackSlot(li.reg); 1692 } 1693 1694 // Create new intervals and rewrite defs and uses. 1695 for (LiveInterval::Ranges::const_iterator 1696 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1697 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1698 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1699 bool DefIsReMat = ReMatDefMI != NULL; 1700 bool CanDelete = ReMatDelete[I->valno->id]; 1701 int LdSlot = 0; 1702 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1703 bool isLoad = isLoadSS || 1704 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1705 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1706 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1707 CanDelete, vrm, rc, ReMatIds, loopInfo, 1708 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1709 MBBVRegsMap, NewLIs); 1710 } 1711 1712 // Insert spills / restores if we are splitting. 1713 if (!TrySplit) { 1714 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1715 normalizeSpillWeights(NewLIs); 1716 return NewLIs; 1717 } 1718 1719 SmallPtrSet<LiveInterval*, 4> AddedKill; 1720 SmallVector<unsigned, 2> Ops; 1721 if (NeedStackSlot) { 1722 int Id = SpillMBBs.find_first(); 1723 while (Id != -1) { 1724 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1725 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1726 SlotIndex index = spills[i].index; 1727 unsigned VReg = spills[i].vreg; 1728 LiveInterval &nI = getOrCreateInterval(VReg); 1729 bool isReMat = vrm.isReMaterialized(VReg); 1730 MachineInstr *MI = getInstructionFromIndex(index); 1731 bool CanFold = false; 1732 bool FoundUse = false; 1733 Ops.clear(); 1734 if (spills[i].canFold) { 1735 CanFold = true; 1736 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1737 MachineOperand &MO = MI->getOperand(j); 1738 if (!MO.isReg() || MO.getReg() != VReg) 1739 continue; 1740 1741 Ops.push_back(j); 1742 if (MO.isDef()) 1743 continue; 1744 if (isReMat || 1745 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1746 RestoreMBBs, RestoreIdxes))) { 1747 // MI has two-address uses of the same register. If the use 1748 // isn't the first and only use in the BB, then we can't fold 1749 // it. FIXME: Move this to rewriteInstructionsForSpills. 1750 CanFold = false; 1751 break; 1752 } 1753 FoundUse = true; 1754 } 1755 } 1756 // Fold the store into the def if possible. 1757 bool Folded = false; 1758 if (CanFold && !Ops.empty()) { 1759 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1760 Folded = true; 1761 if (FoundUse) { 1762 // Also folded uses, do not issue a load. 1763 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1764 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1765 } 1766 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1767 } 1768 } 1769 1770 // Otherwise tell the spiller to issue a spill. 1771 if (!Folded) { 1772 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1773 bool isKill = LR->end == index.getStoreIndex(); 1774 if (!MI->registerDefIsDead(nI.reg)) 1775 // No need to spill a dead def. 1776 vrm.addSpillPoint(VReg, isKill, MI); 1777 if (isKill) 1778 AddedKill.insert(&nI); 1779 } 1780 } 1781 Id = SpillMBBs.find_next(Id); 1782 } 1783 } 1784 1785 int Id = RestoreMBBs.find_first(); 1786 while (Id != -1) { 1787 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1788 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1789 SlotIndex index = restores[i].index; 1790 if (index == SlotIndex()) 1791 continue; 1792 unsigned VReg = restores[i].vreg; 1793 LiveInterval &nI = getOrCreateInterval(VReg); 1794 bool isReMat = vrm.isReMaterialized(VReg); 1795 MachineInstr *MI = getInstructionFromIndex(index); 1796 bool CanFold = false; 1797 Ops.clear(); 1798 if (restores[i].canFold) { 1799 CanFold = true; 1800 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1801 MachineOperand &MO = MI->getOperand(j); 1802 if (!MO.isReg() || MO.getReg() != VReg) 1803 continue; 1804 1805 if (MO.isDef()) { 1806 // If this restore were to be folded, it would have been folded 1807 // already. 1808 CanFold = false; 1809 break; 1810 } 1811 Ops.push_back(j); 1812 } 1813 } 1814 1815 // Fold the load into the use if possible. 1816 bool Folded = false; 1817 if (CanFold && !Ops.empty()) { 1818 if (!isReMat) 1819 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1820 else { 1821 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1822 int LdSlot = 0; 1823 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1824 // If the rematerializable def is a load, also try to fold it. 1825 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1826 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1827 Ops, isLoadSS, LdSlot, VReg); 1828 if (!Folded) { 1829 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1830 if (ImpUse) { 1831 // Re-matting an instruction with virtual register use. Add the 1832 // register as an implicit use on the use MI and mark the register 1833 // interval as unspillable. 1834 LiveInterval &ImpLi = getInterval(ImpUse); 1835 ImpLi.markNotSpillable(); 1836 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1837 } 1838 } 1839 } 1840 } 1841 // If folding is not possible / failed, then tell the spiller to issue a 1842 // load / rematerialization for us. 1843 if (Folded) 1844 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1845 else 1846 vrm.addRestorePoint(VReg, MI); 1847 } 1848 Id = RestoreMBBs.find_next(Id); 1849 } 1850 1851 // Finalize intervals: add kills, finalize spill weights, and filter out 1852 // dead intervals. 1853 std::vector<LiveInterval*> RetNewLIs; 1854 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1855 LiveInterval *LI = NewLIs[i]; 1856 if (!LI->empty()) { 1857 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 1858 if (!AddedKill.count(LI)) { 1859 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1860 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 1861 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1862 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1863 assert(UseIdx != -1); 1864 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 1865 LastUse->getOperand(UseIdx).setIsKill(); 1866 vrm.addKillPoint(LI->reg, LastUseIdx); 1867 } 1868 } 1869 RetNewLIs.push_back(LI); 1870 } 1871 } 1872 1873 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1874 normalizeSpillWeights(RetNewLIs); 1875 return RetNewLIs; 1876} 1877 1878/// hasAllocatableSuperReg - Return true if the specified physical register has 1879/// any super register that's allocatable. 1880bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1881 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1882 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1883 return true; 1884 return false; 1885} 1886 1887/// getRepresentativeReg - Find the largest super register of the specified 1888/// physical register. 1889unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1890 // Find the largest super-register that is allocatable. 1891 unsigned BestReg = Reg; 1892 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1893 unsigned SuperReg = *AS; 1894 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1895 BestReg = SuperReg; 1896 break; 1897 } 1898 } 1899 return BestReg; 1900} 1901 1902/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1903/// specified interval that conflicts with the specified physical register. 1904unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1905 unsigned PhysReg) const { 1906 unsigned NumConflicts = 0; 1907 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 1908 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1909 E = mri_->reg_end(); I != E; ++I) { 1910 MachineOperand &O = I.getOperand(); 1911 MachineInstr *MI = O.getParent(); 1912 if (MI->isDebugValue()) 1913 continue; 1914 SlotIndex Index = getInstructionIndex(MI); 1915 if (pli.liveAt(Index)) 1916 ++NumConflicts; 1917 } 1918 return NumConflicts; 1919} 1920 1921/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 1922/// around all defs and uses of the specified interval. Return true if it 1923/// was able to cut its interval. 1924bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 1925 unsigned PhysReg, VirtRegMap &vrm) { 1926 unsigned SpillReg = getRepresentativeReg(PhysReg); 1927 1928 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 1929 // If there are registers which alias PhysReg, but which are not a 1930 // sub-register of the chosen representative super register. Assert 1931 // since we can't handle it yet. 1932 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 1933 tri_->isSuperRegister(*AS, SpillReg)); 1934 1935 bool Cut = false; 1936 SmallVector<unsigned, 4> PRegs; 1937 if (hasInterval(SpillReg)) 1938 PRegs.push_back(SpillReg); 1939 else { 1940 SmallSet<unsigned, 4> Added; 1941 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 1942 if (Added.insert(*AS) && hasInterval(*AS)) { 1943 PRegs.push_back(*AS); 1944 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 1945 Added.insert(*ASS); 1946 } 1947 } 1948 1949 SmallPtrSet<MachineInstr*, 8> SeenMIs; 1950 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1951 E = mri_->reg_end(); I != E; ++I) { 1952 MachineOperand &O = I.getOperand(); 1953 MachineInstr *MI = O.getParent(); 1954 if (MI->isDebugValue() || SeenMIs.count(MI)) 1955 continue; 1956 SeenMIs.insert(MI); 1957 SlotIndex Index = getInstructionIndex(MI); 1958 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 1959 unsigned PReg = PRegs[i]; 1960 LiveInterval &pli = getInterval(PReg); 1961 if (!pli.liveAt(Index)) 1962 continue; 1963 vrm.addEmergencySpill(PReg, MI); 1964 SlotIndex StartIdx = Index.getLoadIndex(); 1965 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 1966 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 1967 pli.removeRange(StartIdx, EndIdx); 1968 Cut = true; 1969 } else { 1970 std::string msg; 1971 raw_string_ostream Msg(msg); 1972 Msg << "Ran out of registers during register allocation!"; 1973 if (MI->isInlineAsm()) { 1974 Msg << "\nPlease check your inline asm statement for invalid " 1975 << "constraints:\n"; 1976 MI->print(Msg, tm_); 1977 } 1978 report_fatal_error(Msg.str()); 1979 } 1980 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 1981 if (!hasInterval(*AS)) 1982 continue; 1983 LiveInterval &spli = getInterval(*AS); 1984 if (spli.liveAt(Index)) 1985 spli.removeRange(Index.getLoadIndex(), 1986 Index.getNextIndex().getBaseIndex()); 1987 } 1988 } 1989 } 1990 return Cut; 1991} 1992 1993LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 1994 MachineInstr* startInst) { 1995 LiveInterval& Interval = getOrCreateInterval(reg); 1996 VNInfo* VN = Interval.getNextValue( 1997 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 1998 startInst, true, getVNInfoAllocator()); 1999 VN->setHasPHIKill(true); 2000 LiveRange LR( 2001 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2002 getMBBEndIdx(startInst->getParent()), VN); 2003 Interval.addRange(LR); 2004 2005 return LR; 2006} 2007 2008