LiveIntervalAnalysis.cpp revision 133f10f9bddc857d86fab89cfe5573f2515ae7cf
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineLoopInfo.h" 27#include "llvm/CodeGen/MachineRegisterInfo.h" 28#include "llvm/CodeGen/Passes.h" 29#include "llvm/CodeGen/PseudoSourceValue.h" 30#include "llvm/Target/TargetRegisterInfo.h" 31#include "llvm/Target/TargetInstrInfo.h" 32#include "llvm/Target/TargetMachine.h" 33#include "llvm/Support/CommandLine.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/ADT/Statistic.h" 36#include "llvm/ADT/STLExtras.h" 37#include <algorithm> 38#include <cmath> 39using namespace llvm; 40 41// Hidden options for help debugging. 42static cl::opt<bool> DisableReMat("disable-rematerialization", 43 cl::init(false), cl::Hidden); 44 45static cl::opt<bool> SplitAtBB("split-intervals-at-bb", 46 cl::init(true), cl::Hidden); 47static cl::opt<int> SplitLimit("split-limit", 48 cl::init(-1), cl::Hidden); 49 50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden); 51 52STATISTIC(numIntervals, "Number of original intervals"); 53STATISTIC(numIntervalsAfter, "Number of intervals after coalescing"); 54STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 55STATISTIC(numSplits , "Number of intervals split"); 56 57char LiveIntervals::ID = 0; 58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 59 60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addRequired<AliasAnalysis>(); 62 AU.addPreserved<AliasAnalysis>(); 63 AU.addPreserved<LiveVariables>(); 64 AU.addRequired<LiveVariables>(); 65 AU.addPreservedID(MachineLoopInfoID); 66 AU.addPreservedID(MachineDominatorsID); 67 AU.addPreservedID(PHIEliminationID); 68 AU.addRequiredID(PHIEliminationID); 69 AU.addRequiredID(TwoAddressInstructionPassID); 70 MachineFunctionPass::getAnalysisUsage(AU); 71} 72 73void LiveIntervals::releaseMemory() { 74 // Free the live intervals themselves. 75 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 76 E = r2iMap_.end(); I != E; ++I) 77 delete I->second; 78 79 MBB2IdxMap.clear(); 80 Idx2MBBMap.clear(); 81 mi2iMap_.clear(); 82 i2miMap_.clear(); 83 r2iMap_.clear(); 84 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 85 VNInfoAllocator.Reset(); 86 while (!ClonedMIs.empty()) { 87 MachineInstr *MI = ClonedMIs.back(); 88 ClonedMIs.pop_back(); 89 mf_->DeleteMachineInstr(MI); 90 } 91} 92 93void LiveIntervals::computeNumbering() { 94 Index2MiMap OldI2MI = i2miMap_; 95 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap; 96 97 Idx2MBBMap.clear(); 98 MBB2IdxMap.clear(); 99 mi2iMap_.clear(); 100 i2miMap_.clear(); 101 102 FunctionSize = 0; 103 104 // Number MachineInstrs and MachineBasicBlocks. 105 // Initialize MBB indexes to a sentinal. 106 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U)); 107 108 unsigned MIIndex = 0; 109 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end(); 110 MBB != E; ++MBB) { 111 unsigned StartIdx = MIIndex; 112 113 // Insert an empty slot at the beginning of each block. 114 MIIndex += InstrSlots::NUM; 115 i2miMap_.push_back(0); 116 117 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 118 I != E; ++I) { 119 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second; 120 assert(inserted && "multiple MachineInstr -> index mappings"); 121 i2miMap_.push_back(I); 122 MIIndex += InstrSlots::NUM; 123 FunctionSize++; 124 125 // Insert an empty slot after every instruction. 126 MIIndex += InstrSlots::NUM; 127 i2miMap_.push_back(0); 128 } 129 130 // Set the MBB2IdxMap entry for this MBB. 131 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1); 132 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB)); 133 } 134 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare()); 135 136 if (!OldI2MI.empty()) 137 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) { 138 for (LiveInterval::iterator LI = OI->second->begin(), 139 LE = OI->second->end(); LI != LE; ++LI) { 140 141 // Remap the start index of the live range to the corresponding new 142 // number, or our best guess at what it _should_ correspond to if the 143 // original instruction has been erased. This is either the following 144 // instruction or its predecessor. 145 unsigned index = LI->start / InstrSlots::NUM; 146 unsigned offset = LI->start % InstrSlots::NUM; 147 if (offset == InstrSlots::LOAD) { 148 std::vector<IdxMBBPair>::const_iterator I = 149 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start); 150 // Take the pair containing the index 151 std::vector<IdxMBBPair>::const_iterator J = 152 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I; 153 154 LI->start = getMBBStartIdx(J->second); 155 } else { 156 LI->start = mi2iMap_[OldI2MI[index]] + offset; 157 } 158 159 // Remap the ending index in the same way that we remapped the start, 160 // except for the final step where we always map to the immediately 161 // following instruction. 162 index = (LI->end - 1) / InstrSlots::NUM; 163 offset = LI->end % InstrSlots::NUM; 164 if (offset == InstrSlots::LOAD) { 165 // VReg dies at end of block. 166 std::vector<IdxMBBPair>::const_iterator I = 167 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end); 168 --I; 169 170 LI->end = getMBBEndIdx(I->second) + 1; 171 } else { 172 unsigned idx = index; 173 while (index < OldI2MI.size() && !OldI2MI[index]) ++index; 174 175 if (index != OldI2MI.size()) 176 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0); 177 else 178 LI->end = InstrSlots::NUM * i2miMap_.size(); 179 } 180 } 181 182 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(), 183 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) { 184 VNInfo* vni = *VNI; 185 186 // Remap the VNInfo def index, which works the same as the 187 // start indices above. VN's with special sentinel defs 188 // don't need to be remapped. 189 if (vni->def != ~0U && vni->def != ~1U) { 190 unsigned index = vni->def / InstrSlots::NUM; 191 unsigned offset = vni->def % InstrSlots::NUM; 192 if (offset == InstrSlots::LOAD) { 193 std::vector<IdxMBBPair>::const_iterator I = 194 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def); 195 // Take the pair containing the index 196 std::vector<IdxMBBPair>::const_iterator J = 197 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I; 198 199 vni->def = getMBBStartIdx(J->second); 200 } else { 201 vni->def = mi2iMap_[OldI2MI[index]] + offset; 202 } 203 } 204 205 // Remap the VNInfo kill indices, which works the same as 206 // the end indices above. 207 for (size_t i = 0; i < vni->kills.size(); ++i) { 208 // PHI kills don't need to be remapped. 209 if (!vni->kills[i]) continue; 210 211 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM; 212 unsigned offset = vni->kills[i] % InstrSlots::NUM; 213 if (offset == InstrSlots::STORE) { 214 std::vector<IdxMBBPair>::const_iterator I = 215 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]); 216 --I; 217 218 vni->kills[i] = getMBBEndIdx(I->second); 219 } else { 220 unsigned idx = index; 221 while (index < OldI2MI.size() && !OldI2MI[index]) ++index; 222 223 if (index != OldI2MI.size()) 224 vni->kills[i] = mi2iMap_[OldI2MI[index]] + 225 (idx == index ? offset : 0); 226 else 227 vni->kills[i] = InstrSlots::NUM * i2miMap_.size(); 228 } 229 } 230 } 231 } 232} 233 234/// runOnMachineFunction - Register allocate the whole function 235/// 236bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 237 mf_ = &fn; 238 mri_ = &mf_->getRegInfo(); 239 tm_ = &fn.getTarget(); 240 tri_ = tm_->getRegisterInfo(); 241 tii_ = tm_->getInstrInfo(); 242 aa_ = &getAnalysis<AliasAnalysis>(); 243 lv_ = &getAnalysis<LiveVariables>(); 244 allocatableRegs_ = tri_->getAllocatableSet(fn); 245 246 computeNumbering(); 247 computeIntervals(); 248 249 numIntervals += getNumIntervals(); 250 251 DOUT << "********** INTERVALS **********\n"; 252 for (iterator I = begin(), E = end(); I != E; ++I) { 253 I->second->print(DOUT, tri_); 254 DOUT << "\n"; 255 } 256 257 numIntervalsAfter += getNumIntervals(); 258 DEBUG(dump()); 259 return true; 260} 261 262/// print - Implement the dump method. 263void LiveIntervals::print(std::ostream &O, const Module* ) const { 264 O << "********** INTERVALS **********\n"; 265 for (const_iterator I = begin(), E = end(); I != E; ++I) { 266 I->second->print(O, tri_); 267 O << "\n"; 268 } 269 270 O << "********** MACHINEINSTRS **********\n"; 271 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 272 mbbi != mbbe; ++mbbi) { 273 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n"; 274 for (MachineBasicBlock::iterator mii = mbbi->begin(), 275 mie = mbbi->end(); mii != mie; ++mii) { 276 O << getInstructionIndex(mii) << '\t' << *mii; 277 } 278 } 279} 280 281/// conflictsWithPhysRegDef - Returns true if the specified register 282/// is defined during the duration of the specified interval. 283bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li, 284 VirtRegMap &vrm, unsigned reg) { 285 for (LiveInterval::Ranges::const_iterator 286 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 287 for (unsigned index = getBaseIndex(I->start), 288 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end; 289 index += InstrSlots::NUM) { 290 // skip deleted instructions 291 while (index != end && !getInstructionFromIndex(index)) 292 index += InstrSlots::NUM; 293 if (index == end) break; 294 295 MachineInstr *MI = getInstructionFromIndex(index); 296 unsigned SrcReg, DstReg; 297 if (tii_->isMoveInstr(*MI, SrcReg, DstReg)) 298 if (SrcReg == li.reg || DstReg == li.reg) 299 continue; 300 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 301 MachineOperand& mop = MI->getOperand(i); 302 if (!mop.isRegister()) 303 continue; 304 unsigned PhysReg = mop.getReg(); 305 if (PhysReg == 0 || PhysReg == li.reg) 306 continue; 307 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 308 if (!vrm.hasPhys(PhysReg)) 309 continue; 310 PhysReg = vrm.getPhys(PhysReg); 311 } 312 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 313 return true; 314 } 315 } 316 } 317 318 return false; 319} 320 321void LiveIntervals::printRegName(unsigned reg) const { 322 if (TargetRegisterInfo::isPhysicalRegister(reg)) 323 cerr << tri_->getName(reg); 324 else 325 cerr << "%reg" << reg; 326} 327 328void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 329 MachineBasicBlock::iterator mi, 330 unsigned MIIdx, MachineOperand& MO, 331 unsigned MOIdx, 332 LiveInterval &interval) { 333 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg)); 334 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 335 336 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 337 DOUT << "is a implicit_def\n"; 338 return; 339 } 340 341 // Virtual registers may be defined multiple times (due to phi 342 // elimination and 2-addr elimination). Much of what we do only has to be 343 // done once for the vreg. We use an empty interval to detect the first 344 // time we see a vreg. 345 if (interval.empty()) { 346 // Get the Idx of the defining instructions. 347 unsigned defIndex = getDefIndex(MIIdx); 348 VNInfo *ValNo; 349 MachineInstr *CopyMI = NULL; 350 unsigned SrcReg, DstReg; 351 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 352 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 353 tii_->isMoveInstr(*mi, SrcReg, DstReg)) 354 CopyMI = mi; 355 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); 356 357 assert(ValNo->id == 0 && "First value in interval is not 0?"); 358 359 // Loop over all of the blocks that the vreg is defined in. There are 360 // two cases we have to handle here. The most common case is a vreg 361 // whose lifetime is contained within a basic block. In this case there 362 // will be a single kill, in MBB, which comes after the definition. 363 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 364 // FIXME: what about dead vars? 365 unsigned killIdx; 366 if (vi.Kills[0] != mi) 367 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1; 368 else 369 killIdx = defIndex+1; 370 371 // If the kill happens after the definition, we have an intra-block 372 // live range. 373 if (killIdx > defIndex) { 374 assert(vi.AliveBlocks.none() && 375 "Shouldn't be alive across any blocks!"); 376 LiveRange LR(defIndex, killIdx, ValNo); 377 interval.addRange(LR); 378 DOUT << " +" << LR << "\n"; 379 interval.addKill(ValNo, killIdx); 380 return; 381 } 382 } 383 384 // The other case we handle is when a virtual register lives to the end 385 // of the defining block, potentially live across some blocks, then is 386 // live into some number of blocks, but gets killed. Start by adding a 387 // range that goes from this definition to the end of the defining block. 388 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo); 389 DOUT << " +" << NewLR; 390 interval.addRange(NewLR); 391 392 // Iterate over all of the blocks that the variable is completely 393 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 394 // live interval. 395 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) { 396 if (vi.AliveBlocks[i]) { 397 LiveRange LR(getMBBStartIdx(i), 398 getMBBEndIdx(i)+1, // MBB ends at -1. 399 ValNo); 400 interval.addRange(LR); 401 DOUT << " +" << LR; 402 } 403 } 404 405 // Finally, this virtual register is live from the start of any killing 406 // block to the 'use' slot of the killing instruction. 407 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 408 MachineInstr *Kill = vi.Kills[i]; 409 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1; 410 LiveRange LR(getMBBStartIdx(Kill->getParent()), 411 killIdx, ValNo); 412 interval.addRange(LR); 413 interval.addKill(ValNo, killIdx); 414 DOUT << " +" << LR; 415 } 416 417 } else { 418 // If this is the second time we see a virtual register definition, it 419 // must be due to phi elimination or two addr elimination. If this is 420 // the result of two address elimination, then the vreg is one of the 421 // def-and-use register operand. 422 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) { 423 // If this is a two-address definition, then we have already processed 424 // the live range. The only problem is that we didn't realize there 425 // are actually two values in the live interval. Because of this we 426 // need to take the LiveRegion that defines this register and split it 427 // into two values. 428 assert(interval.containsOneValue()); 429 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def); 430 unsigned RedefIndex = getDefIndex(MIIdx); 431 432 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1); 433 VNInfo *OldValNo = OldLR->valno; 434 435 // Delete the initial value, which should be short and continuous, 436 // because the 2-addr copy must be in the same MBB as the redef. 437 interval.removeRange(DefIndex, RedefIndex); 438 439 // Two-address vregs should always only be redefined once. This means 440 // that at this point, there should be exactly one value number in it. 441 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!"); 442 443 // The new value number (#1) is defined by the instruction we claimed 444 // defined value #0. 445 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy, 446 VNInfoAllocator); 447 448 // Value#0 is now defined by the 2-addr instruction. 449 OldValNo->def = RedefIndex; 450 OldValNo->copy = 0; 451 452 // Add the new live interval which replaces the range for the input copy. 453 LiveRange LR(DefIndex, RedefIndex, ValNo); 454 DOUT << " replace range with " << LR; 455 interval.addRange(LR); 456 interval.addKill(ValNo, RedefIndex); 457 458 // If this redefinition is dead, we need to add a dummy unit live 459 // range covering the def slot. 460 if (MO.isDead()) 461 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo)); 462 463 DOUT << " RESULT: "; 464 interval.print(DOUT, tri_); 465 466 } else { 467 // Otherwise, this must be because of phi elimination. If this is the 468 // first redefinition of the vreg that we have seen, go back and change 469 // the live range in the PHI block to be a different value number. 470 if (interval.containsOneValue()) { 471 assert(vi.Kills.size() == 1 && 472 "PHI elimination vreg should have one kill, the PHI itself!"); 473 474 // Remove the old range that we now know has an incorrect number. 475 VNInfo *VNI = interval.getValNumInfo(0); 476 MachineInstr *Killer = vi.Kills[0]; 477 unsigned Start = getMBBStartIdx(Killer->getParent()); 478 unsigned End = getUseIndex(getInstructionIndex(Killer))+1; 479 DOUT << " Removing [" << Start << "," << End << "] from: "; 480 interval.print(DOUT, tri_); DOUT << "\n"; 481 interval.removeRange(Start, End); 482 VNI->hasPHIKill = true; 483 DOUT << " RESULT: "; interval.print(DOUT, tri_); 484 485 // Replace the interval with one of a NEW value number. Note that this 486 // value number isn't actually defined by an instruction, weird huh? :) 487 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator)); 488 DOUT << " replace range with " << LR; 489 interval.addRange(LR); 490 interval.addKill(LR.valno, End); 491 DOUT << " RESULT: "; interval.print(DOUT, tri_); 492 } 493 494 // In the case of PHI elimination, each variable definition is only 495 // live until the end of the block. We've already taken care of the 496 // rest of the live range. 497 unsigned defIndex = getDefIndex(MIIdx); 498 499 VNInfo *ValNo; 500 MachineInstr *CopyMI = NULL; 501 unsigned SrcReg, DstReg; 502 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 503 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 504 tii_->isMoveInstr(*mi, SrcReg, DstReg)) 505 CopyMI = mi; 506 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); 507 508 unsigned killIndex = getMBBEndIdx(mbb) + 1; 509 LiveRange LR(defIndex, killIndex, ValNo); 510 interval.addRange(LR); 511 interval.addKill(ValNo, killIndex); 512 ValNo->hasPHIKill = true; 513 DOUT << " +" << LR; 514 } 515 } 516 517 DOUT << '\n'; 518} 519 520void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 521 MachineBasicBlock::iterator mi, 522 unsigned MIIdx, 523 MachineOperand& MO, 524 LiveInterval &interval, 525 MachineInstr *CopyMI) { 526 // A physical register cannot be live across basic block, so its 527 // lifetime must end somewhere in its defining basic block. 528 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg)); 529 530 unsigned baseIndex = MIIdx; 531 unsigned start = getDefIndex(baseIndex); 532 unsigned end = start; 533 534 // If it is not used after definition, it is considered dead at 535 // the instruction defining it. Hence its interval is: 536 // [defSlot(def), defSlot(def)+1) 537 if (MO.isDead()) { 538 DOUT << " dead"; 539 end = getDefIndex(start) + 1; 540 goto exit; 541 } 542 543 // If it is not dead on definition, it must be killed by a 544 // subsequent instruction. Hence its interval is: 545 // [defSlot(def), useSlot(kill)+1) 546 baseIndex += InstrSlots::NUM; 547 while (++mi != MBB->end()) { 548 while (baseIndex / InstrSlots::NUM < i2miMap_.size() && 549 getInstructionFromIndex(baseIndex) == 0) 550 baseIndex += InstrSlots::NUM; 551 if (mi->killsRegister(interval.reg, tri_)) { 552 DOUT << " killed"; 553 end = getUseIndex(baseIndex) + 1; 554 goto exit; 555 } else if (mi->modifiesRegister(interval.reg, tri_)) { 556 // Another instruction redefines the register before it is ever read. 557 // Then the register is essentially dead at the instruction that defines 558 // it. Hence its interval is: 559 // [defSlot(def), defSlot(def)+1) 560 DOUT << " dead"; 561 end = getDefIndex(start) + 1; 562 goto exit; 563 } 564 565 baseIndex += InstrSlots::NUM; 566 } 567 568 // The only case we should have a dead physreg here without a killing or 569 // instruction where we know it's dead is if it is live-in to the function 570 // and never used. 571 assert(!CopyMI && "physreg was not killed in defining block!"); 572 end = getDefIndex(start) + 1; // It's dead. 573 574exit: 575 assert(start < end && "did not find end of interval?"); 576 577 // Already exists? Extend old live interval. 578 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 579 VNInfo *ValNo = (OldLR != interval.end()) 580 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator); 581 LiveRange LR(start, end, ValNo); 582 interval.addRange(LR); 583 interval.addKill(LR.valno, end); 584 DOUT << " +" << LR << '\n'; 585} 586 587void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 588 MachineBasicBlock::iterator MI, 589 unsigned MIIdx, 590 MachineOperand& MO, 591 unsigned MOIdx) { 592 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 593 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 594 getOrCreateInterval(MO.getReg())); 595 else if (allocatableRegs_[MO.getReg()]) { 596 MachineInstr *CopyMI = NULL; 597 unsigned SrcReg, DstReg; 598 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 599 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 600 tii_->isMoveInstr(*MI, SrcReg, DstReg)) 601 CopyMI = MI; 602 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 603 getOrCreateInterval(MO.getReg()), CopyMI); 604 // Def of a register also defines its sub-registers. 605 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 606 // If MI also modifies the sub-register explicitly, avoid processing it 607 // more than once. Do not pass in TRI here so it checks for exact match. 608 if (!MI->modifiesRegister(*AS)) 609 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 610 getOrCreateInterval(*AS), 0); 611 } 612} 613 614void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 615 unsigned MIIdx, 616 LiveInterval &interval, bool isAlias) { 617 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg)); 618 619 // Look for kills, if it reaches a def before it's killed, then it shouldn't 620 // be considered a livein. 621 MachineBasicBlock::iterator mi = MBB->begin(); 622 unsigned baseIndex = MIIdx; 623 unsigned start = baseIndex; 624 unsigned end = start; 625 while (mi != MBB->end()) { 626 if (mi->killsRegister(interval.reg, tri_)) { 627 DOUT << " killed"; 628 end = getUseIndex(baseIndex) + 1; 629 goto exit; 630 } else if (mi->modifiesRegister(interval.reg, tri_)) { 631 // Another instruction redefines the register before it is ever read. 632 // Then the register is essentially dead at the instruction that defines 633 // it. Hence its interval is: 634 // [defSlot(def), defSlot(def)+1) 635 DOUT << " dead"; 636 end = getDefIndex(start) + 1; 637 goto exit; 638 } 639 640 baseIndex += InstrSlots::NUM; 641 while (baseIndex / InstrSlots::NUM < i2miMap_.size() && 642 getInstructionFromIndex(baseIndex) == 0) 643 baseIndex += InstrSlots::NUM; 644 ++mi; 645 } 646 647exit: 648 // Live-in register might not be used at all. 649 if (end == MIIdx) { 650 if (isAlias) { 651 DOUT << " dead"; 652 end = getDefIndex(MIIdx) + 1; 653 } else { 654 DOUT << " live through"; 655 end = baseIndex; 656 } 657 } 658 659 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator)); 660 interval.addRange(LR); 661 interval.addKill(LR.valno, end); 662 DOUT << " +" << LR << '\n'; 663} 664 665/// computeIntervals - computes the live intervals for virtual 666/// registers. for some ordering of the machine instructions [1,N] a 667/// live interval is an interval [i, j) where 1 <= i <= j < N for 668/// which a variable is live 669void LiveIntervals::computeIntervals() { 670 DOUT << "********** COMPUTING LIVE INTERVALS **********\n" 671 << "********** Function: " 672 << ((Value*)mf_->getFunction())->getName() << '\n'; 673 // Track the index of the current machine instr. 674 unsigned MIIndex = 0; 675 676 // Skip over empty initial indices. 677 while (MIIndex / InstrSlots::NUM < i2miMap_.size() && 678 getInstructionFromIndex(MIIndex) == 0) 679 MIIndex += InstrSlots::NUM; 680 681 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 682 MBBI != E; ++MBBI) { 683 MachineBasicBlock *MBB = MBBI; 684 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n"; 685 686 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 687 688 // Create intervals for live-ins to this BB first. 689 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(), 690 LE = MBB->livein_end(); LI != LE; ++LI) { 691 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 692 // Multiple live-ins can alias the same register. 693 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 694 if (!hasInterval(*AS)) 695 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 696 true); 697 } 698 699 for (; MI != miEnd; ++MI) { 700 DOUT << MIIndex << "\t" << *MI; 701 702 // Handle defs. 703 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 704 MachineOperand &MO = MI->getOperand(i); 705 // handle register defs - build intervals 706 if (MO.isRegister() && MO.getReg() && MO.isDef()) 707 handleRegisterDef(MBB, MI, MIIndex, MO, i); 708 } 709 710 MIIndex += InstrSlots::NUM; 711 712 // Skip over empty indices. 713 while (MIIndex / InstrSlots::NUM < i2miMap_.size() && 714 getInstructionFromIndex(MIIndex) == 0) 715 MIIndex += InstrSlots::NUM; 716 } 717 } 718} 719 720bool LiveIntervals::findLiveInMBBs(const LiveRange &LR, 721 SmallVectorImpl<MachineBasicBlock*> &MBBs) const { 722 std::vector<IdxMBBPair>::const_iterator I = 723 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start); 724 725 bool ResVal = false; 726 while (I != Idx2MBBMap.end()) { 727 if (LR.end <= I->first) 728 break; 729 MBBs.push_back(I->second); 730 ResVal = true; 731 ++I; 732 } 733 return ResVal; 734} 735 736 737LiveInterval* LiveIntervals::createInterval(unsigned reg) { 738 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? 739 HUGE_VALF : 0.0F; 740 return new LiveInterval(reg, Weight); 741} 742 743/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 744/// copy field and returns the source register that defines it. 745unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 746 if (!VNI->copy) 747 return 0; 748 749 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) 750 return VNI->copy->getOperand(1).getReg(); 751 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG) 752 return VNI->copy->getOperand(2).getReg(); 753 unsigned SrcReg, DstReg; 754 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg)) 755 return SrcReg; 756 assert(0 && "Unrecognized copy instruction!"); 757 return 0; 758} 759 760//===----------------------------------------------------------------------===// 761// Register allocator hooks. 762// 763 764/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 765/// allow one) virtual register operand, then its uses are implicitly using 766/// the register. Returns the virtual register. 767unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 768 MachineInstr *MI) const { 769 unsigned RegOp = 0; 770 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 771 MachineOperand &MO = MI->getOperand(i); 772 if (!MO.isRegister() || !MO.isUse()) 773 continue; 774 unsigned Reg = MO.getReg(); 775 if (Reg == 0 || Reg == li.reg) 776 continue; 777 // FIXME: For now, only remat MI with at most one register operand. 778 assert(!RegOp && 779 "Can't rematerialize instruction with multiple register operand!"); 780 RegOp = MO.getReg(); 781#ifndef NDEBUG 782 break; 783#endif 784 } 785 return RegOp; 786} 787 788/// isValNoAvailableAt - Return true if the val# of the specified interval 789/// which reaches the given instruction also reaches the specified use index. 790bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 791 unsigned UseIdx) const { 792 unsigned Index = getInstructionIndex(MI); 793 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 794 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 795 return UI != li.end() && UI->valno == ValNo; 796} 797 798/// isReMaterializable - Returns true if the definition MI of the specified 799/// val# of the specified interval is re-materializable. 800bool LiveIntervals::isReMaterializable(const LiveInterval &li, 801 const VNInfo *ValNo, MachineInstr *MI, 802 bool &isLoad) { 803 if (DisableReMat) 804 return false; 805 806 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) 807 return true; 808 809 int FrameIdx = 0; 810 if (tii_->isLoadFromStackSlot(MI, FrameIdx) && 811 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx)) 812 // FIXME: Let target specific isReallyTriviallyReMaterializable determines 813 // this but remember this is not safe to fold into a two-address 814 // instruction. 815 // This is a load from fixed stack slot. It can be rematerialized. 816 return true; 817 818 // If the target-specific rules don't identify an instruction as 819 // being trivially rematerializable, use some target-independent 820 // rules. 821 if (!MI->getDesc().isRematerializable() || 822 !tii_->isTriviallyReMaterializable(MI)) { 823 if (!EnableAggressiveRemat) 824 return false; 825 826 // If the instruction accesses memory but the memoperands have been lost, 827 // we can't analyze it. 828 const TargetInstrDesc &TID = MI->getDesc(); 829 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty()) 830 return false; 831 832 // Avoid instructions obviously unsafe for remat. 833 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable()) 834 return false; 835 836 // If the instruction accesses memory and the memory could be non-constant, 837 // assume the instruction is not rematerializable. 838 for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(), 839 E = MI->memoperands_end(); I != E; ++I) { 840 const MachineMemOperand &MMO = *I; 841 if (MMO.isVolatile() || MMO.isStore()) 842 return false; 843 const Value *V = MMO.getValue(); 844 if (!V) 845 return false; 846 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 847 if (!PSV->isConstant(mf_->getFrameInfo())) 848 return false; 849 } else if (!aa_->pointsToConstantMemory(V)) 850 return false; 851 } 852 853 // If any of the registers accessed are non-constant, conservatively assume 854 // the instruction is not rematerializable. 855 unsigned ImpUse = 0; 856 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 857 const MachineOperand &MO = MI->getOperand(i); 858 if (MO.isReg()) { 859 unsigned Reg = MO.getReg(); 860 if (Reg == 0) 861 continue; 862 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 863 return false; 864 865 // Only allow one def, and that in the first operand. 866 if (MO.isDef() != (i == 0)) 867 return false; 868 869 // Only allow constant-valued registers. 870 bool IsLiveIn = mri_->isLiveIn(Reg); 871 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg), 872 E = mri_->def_end(); 873 874 // For the def, it should be the only def. 875 if (MO.isDef() && (next(I) != E || IsLiveIn)) 876 return false; 877 878 if (MO.isUse()) { 879 // Only allow one use other register use, as that's all the 880 // remat mechanisms support currently. 881 if (Reg != li.reg) { 882 if (ImpUse == 0) 883 ImpUse = Reg; 884 else if (Reg != ImpUse) 885 return false; 886 } 887 // For uses, there should be only one associate def. 888 if (I != E && (next(I) != E || IsLiveIn)) 889 return false; 890 } 891 } 892 } 893 } 894 895 unsigned ImpUse = getReMatImplicitUse(li, MI); 896 if (ImpUse) { 897 const LiveInterval &ImpLi = getInterval(ImpUse); 898 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg), 899 re = mri_->use_end(); ri != re; ++ri) { 900 MachineInstr *UseMI = &*ri; 901 unsigned UseIdx = getInstructionIndex(UseMI); 902 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 903 continue; 904 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 905 return false; 906 } 907 } 908 return true; 909} 910 911/// isReMaterializable - Returns true if every definition of MI of every 912/// val# of the specified interval is re-materializable. 913bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) { 914 isLoad = false; 915 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 916 i != e; ++i) { 917 const VNInfo *VNI = *i; 918 unsigned DefIdx = VNI->def; 919 if (DefIdx == ~1U) 920 continue; // Dead val#. 921 // Is the def for the val# rematerializable? 922 if (DefIdx == ~0u) 923 return false; 924 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx); 925 bool DefIsLoad = false; 926 if (!ReMatDefMI || 927 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad)) 928 return false; 929 isLoad |= DefIsLoad; 930 } 931 return true; 932} 933 934/// FilterFoldedOps - Filter out two-address use operands. Return 935/// true if it finds any issue with the operands that ought to prevent 936/// folding. 937static bool FilterFoldedOps(MachineInstr *MI, 938 SmallVector<unsigned, 2> &Ops, 939 unsigned &MRInfo, 940 SmallVector<unsigned, 2> &FoldOps) { 941 const TargetInstrDesc &TID = MI->getDesc(); 942 943 MRInfo = 0; 944 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 945 unsigned OpIdx = Ops[i]; 946 MachineOperand &MO = MI->getOperand(OpIdx); 947 // FIXME: fold subreg use. 948 if (MO.getSubReg()) 949 return true; 950 if (MO.isDef()) 951 MRInfo |= (unsigned)VirtRegMap::isMod; 952 else { 953 // Filter out two-address use operand(s). 954 if (!MO.isImplicit() && 955 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { 956 MRInfo = VirtRegMap::isModRef; 957 continue; 958 } 959 MRInfo |= (unsigned)VirtRegMap::isRef; 960 } 961 FoldOps.push_back(OpIdx); 962 } 963 return false; 964} 965 966 967/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 968/// slot / to reg or any rematerialized load into ith operand of specified 969/// MI. If it is successul, MI is updated with the newly created MI and 970/// returns true. 971bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 972 VirtRegMap &vrm, MachineInstr *DefMI, 973 unsigned InstrIdx, 974 SmallVector<unsigned, 2> &Ops, 975 bool isSS, int Slot, unsigned Reg) { 976 // If it is an implicit def instruction, just delete it. 977 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 978 RemoveMachineInstrFromMaps(MI); 979 vrm.RemoveMachineInstrFromMaps(MI); 980 MI->eraseFromParent(); 981 ++numFolds; 982 return true; 983 } 984 985 // Filter the list of operand indexes that are to be folded. Abort if 986 // any operand will prevent folding. 987 unsigned MRInfo = 0; 988 SmallVector<unsigned, 2> FoldOps; 989 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 990 return false; 991 992 // The only time it's safe to fold into a two address instruction is when 993 // it's folding reload and spill from / into a spill stack slot. 994 if (DefMI && (MRInfo & VirtRegMap::isMod)) 995 return false; 996 997 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 998 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 999 if (fmi) { 1000 // Remember this instruction uses the spill slot. 1001 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 1002 1003 // Attempt to fold the memory reference into the instruction. If 1004 // we can do this, we don't need to insert spill code. 1005 MachineBasicBlock &MBB = *MI->getParent(); 1006 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 1007 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 1008 vrm.transferSpillPts(MI, fmi); 1009 vrm.transferRestorePts(MI, fmi); 1010 vrm.transferEmergencySpills(MI, fmi); 1011 mi2iMap_.erase(MI); 1012 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi; 1013 mi2iMap_[fmi] = InstrIdx; 1014 MI = MBB.insert(MBB.erase(MI), fmi); 1015 ++numFolds; 1016 return true; 1017 } 1018 return false; 1019} 1020 1021/// canFoldMemoryOperand - Returns true if the specified load / store 1022/// folding is possible. 1023bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 1024 SmallVector<unsigned, 2> &Ops, 1025 bool ReMat) const { 1026 // Filter the list of operand indexes that are to be folded. Abort if 1027 // any operand will prevent folding. 1028 unsigned MRInfo = 0; 1029 SmallVector<unsigned, 2> FoldOps; 1030 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 1031 return false; 1032 1033 // It's only legal to remat for a use, not a def. 1034 if (ReMat && (MRInfo & VirtRegMap::isMod)) 1035 return false; 1036 1037 return tii_->canFoldMemoryOperand(MI, FoldOps); 1038} 1039 1040bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 1041 SmallPtrSet<MachineBasicBlock*, 4> MBBs; 1042 for (LiveInterval::Ranges::const_iterator 1043 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1044 std::vector<IdxMBBPair>::const_iterator II = 1045 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start); 1046 if (II == Idx2MBBMap.end()) 1047 continue; 1048 if (I->end > II->first) // crossing a MBB. 1049 return false; 1050 MBBs.insert(II->second); 1051 if (MBBs.size() > 1) 1052 return false; 1053 } 1054 return true; 1055} 1056 1057/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1058/// interval on to-be re-materialized operands of MI) with new register. 1059void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1060 MachineInstr *MI, unsigned NewVReg, 1061 VirtRegMap &vrm) { 1062 // There is an implicit use. That means one of the other operand is 1063 // being remat'ed and the remat'ed instruction has li.reg as an 1064 // use operand. Make sure we rewrite that as well. 1065 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1066 MachineOperand &MO = MI->getOperand(i); 1067 if (!MO.isRegister()) 1068 continue; 1069 unsigned Reg = MO.getReg(); 1070 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1071 continue; 1072 if (!vrm.isReMaterialized(Reg)) 1073 continue; 1074 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1075 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1076 if (UseMO) 1077 UseMO->setReg(NewVReg); 1078 } 1079} 1080 1081/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1082/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1083bool LiveIntervals:: 1084rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1085 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI, 1086 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1087 unsigned Slot, int LdSlot, 1088 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1089 VirtRegMap &vrm, 1090 const TargetRegisterClass* rc, 1091 SmallVector<int, 4> &ReMatIds, 1092 const MachineLoopInfo *loopInfo, 1093 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1094 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1095 std::vector<LiveInterval*> &NewLIs, float &SSWeight) { 1096 MachineBasicBlock *MBB = MI->getParent(); 1097 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1098 bool CanFold = false; 1099 RestartInstruction: 1100 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1101 MachineOperand& mop = MI->getOperand(i); 1102 if (!mop.isRegister()) 1103 continue; 1104 unsigned Reg = mop.getReg(); 1105 unsigned RegI = Reg; 1106 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1107 continue; 1108 if (Reg != li.reg) 1109 continue; 1110 1111 bool TryFold = !DefIsReMat; 1112 bool FoldSS = true; // Default behavior unless it's a remat. 1113 int FoldSlot = Slot; 1114 if (DefIsReMat) { 1115 // If this is the rematerializable definition MI itself and 1116 // all of its uses are rematerialized, simply delete it. 1117 if (MI == ReMatOrigDefMI && CanDelete) { 1118 DOUT << "\t\t\t\tErasing re-materlizable def: "; 1119 DOUT << MI << '\n'; 1120 RemoveMachineInstrFromMaps(MI); 1121 vrm.RemoveMachineInstrFromMaps(MI); 1122 MI->eraseFromParent(); 1123 break; 1124 } 1125 1126 // If def for this use can't be rematerialized, then try folding. 1127 // If def is rematerializable and it's a load, also try folding. 1128 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1129 if (isLoad) { 1130 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1131 FoldSS = isLoadSS; 1132 FoldSlot = LdSlot; 1133 } 1134 } 1135 1136 // Scan all of the operands of this instruction rewriting operands 1137 // to use NewVReg instead of li.reg as appropriate. We do this for 1138 // two reasons: 1139 // 1140 // 1. If the instr reads the same spilled vreg multiple times, we 1141 // want to reuse the NewVReg. 1142 // 2. If the instr is a two-addr instruction, we are required to 1143 // keep the src/dst regs pinned. 1144 // 1145 // Keep track of whether we replace a use and/or def so that we can 1146 // create the spill interval with the appropriate range. 1147 1148 HasUse = mop.isUse(); 1149 HasDef = mop.isDef(); 1150 SmallVector<unsigned, 2> Ops; 1151 Ops.push_back(i); 1152 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 1153 const MachineOperand &MOj = MI->getOperand(j); 1154 if (!MOj.isRegister()) 1155 continue; 1156 unsigned RegJ = MOj.getReg(); 1157 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 1158 continue; 1159 if (RegJ == RegI) { 1160 Ops.push_back(j); 1161 HasUse |= MOj.isUse(); 1162 HasDef |= MOj.isDef(); 1163 } 1164 } 1165 1166 if (HasUse && !li.liveAt(getUseIndex(index))) 1167 // Must be defined by an implicit def. It should not be spilled. Note, 1168 // this is for correctness reason. e.g. 1169 // 8 %reg1024<def> = IMPLICIT_DEF 1170 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1171 // The live range [12, 14) are not part of the r1024 live interval since 1172 // it's defined by an implicit def. It will not conflicts with live 1173 // interval of r1025. Now suppose both registers are spilled, you can 1174 // easily see a situation where both registers are reloaded before 1175 // the INSERT_SUBREG and both target registers that would overlap. 1176 HasUse = false; 1177 1178 // Update stack slot spill weight if we are splitting. 1179 float Weight = getSpillWeight(HasDef, HasUse, loopDepth); 1180 if (!TrySplit) 1181 SSWeight += Weight; 1182 1183 if (!TryFold) 1184 CanFold = false; 1185 else { 1186 // Do not fold load / store here if we are splitting. We'll find an 1187 // optimal point to insert a load / store later. 1188 if (!TrySplit) { 1189 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1190 Ops, FoldSS, FoldSlot, Reg)) { 1191 // Folding the load/store can completely change the instruction in 1192 // unpredictable ways, rescan it from the beginning. 1193 HasUse = false; 1194 HasDef = false; 1195 CanFold = false; 1196 if (isRemoved(MI)) { 1197 SSWeight -= Weight; 1198 break; 1199 } 1200 goto RestartInstruction; 1201 } 1202 } else { 1203 // We'll try to fold it later if it's profitable. 1204 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1205 } 1206 } 1207 1208 // Create a new virtual register for the spill interval. 1209 bool CreatedNewVReg = false; 1210 if (NewVReg == 0) { 1211 NewVReg = mri_->createVirtualRegister(rc); 1212 vrm.grow(); 1213 CreatedNewVReg = true; 1214 } 1215 mop.setReg(NewVReg); 1216 if (mop.isImplicit()) 1217 rewriteImplicitOps(li, MI, NewVReg, vrm); 1218 1219 // Reuse NewVReg for other reads. 1220 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1221 MachineOperand &mopj = MI->getOperand(Ops[j]); 1222 mopj.setReg(NewVReg); 1223 if (mopj.isImplicit()) 1224 rewriteImplicitOps(li, MI, NewVReg, vrm); 1225 } 1226 1227 if (CreatedNewVReg) { 1228 if (DefIsReMat) { 1229 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/); 1230 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1231 // Each valnum may have its own remat id. 1232 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1233 } else { 1234 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1235 } 1236 if (!CanDelete || (HasUse && HasDef)) { 1237 // If this is a two-addr instruction then its use operands are 1238 // rematerializable but its def is not. It should be assigned a 1239 // stack slot. 1240 vrm.assignVirt2StackSlot(NewVReg, Slot); 1241 } 1242 } else { 1243 vrm.assignVirt2StackSlot(NewVReg, Slot); 1244 } 1245 } else if (HasUse && HasDef && 1246 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1247 // If this interval hasn't been assigned a stack slot (because earlier 1248 // def is a deleted remat def), do it now. 1249 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1250 vrm.assignVirt2StackSlot(NewVReg, Slot); 1251 } 1252 1253 // Re-matting an instruction with virtual register use. Add the 1254 // register as an implicit use on the use MI. 1255 if (DefIsReMat && ImpUse) 1256 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1257 1258 // create a new register interval for this spill / remat. 1259 LiveInterval &nI = getOrCreateInterval(NewVReg); 1260 if (CreatedNewVReg) { 1261 NewLIs.push_back(&nI); 1262 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1263 if (TrySplit) 1264 vrm.setIsSplitFromReg(NewVReg, li.reg); 1265 } 1266 1267 if (HasUse) { 1268 if (CreatedNewVReg) { 1269 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1, 1270 nI.getNextValue(~0U, 0, VNInfoAllocator)); 1271 DOUT << " +" << LR; 1272 nI.addRange(LR); 1273 } else { 1274 // Extend the split live interval to this def / use. 1275 unsigned End = getUseIndex(index)+1; 1276 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1277 nI.getValNumInfo(nI.getNumValNums()-1)); 1278 DOUT << " +" << LR; 1279 nI.addRange(LR); 1280 } 1281 } 1282 if (HasDef) { 1283 LiveRange LR(getDefIndex(index), getStoreIndex(index), 1284 nI.getNextValue(~0U, 0, VNInfoAllocator)); 1285 DOUT << " +" << LR; 1286 nI.addRange(LR); 1287 } 1288 1289 DOUT << "\t\t\t\tAdded new interval: "; 1290 nI.print(DOUT, tri_); 1291 DOUT << '\n'; 1292 } 1293 return CanFold; 1294} 1295bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1296 const VNInfo *VNI, 1297 MachineBasicBlock *MBB, unsigned Idx) const { 1298 unsigned End = getMBBEndIdx(MBB); 1299 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1300 unsigned KillIdx = VNI->kills[j]; 1301 if (KillIdx > Idx && KillIdx < End) 1302 return true; 1303 } 1304 return false; 1305} 1306 1307/// RewriteInfo - Keep track of machine instrs that will be rewritten 1308/// during spilling. 1309namespace { 1310 struct RewriteInfo { 1311 unsigned Index; 1312 MachineInstr *MI; 1313 bool HasUse; 1314 bool HasDef; 1315 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d) 1316 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1317 }; 1318 1319 struct RewriteInfoCompare { 1320 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1321 return LHS.Index < RHS.Index; 1322 } 1323 }; 1324} 1325 1326void LiveIntervals:: 1327rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1328 LiveInterval::Ranges::const_iterator &I, 1329 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1330 unsigned Slot, int LdSlot, 1331 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1332 VirtRegMap &vrm, 1333 const TargetRegisterClass* rc, 1334 SmallVector<int, 4> &ReMatIds, 1335 const MachineLoopInfo *loopInfo, 1336 BitVector &SpillMBBs, 1337 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1338 BitVector &RestoreMBBs, 1339 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1340 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1341 std::vector<LiveInterval*> &NewLIs, float &SSWeight) { 1342 bool AllCanFold = true; 1343 unsigned NewVReg = 0; 1344 unsigned start = getBaseIndex(I->start); 1345 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM; 1346 1347 // First collect all the def / use in this live range that will be rewritten. 1348 // Make sure they are sorted according to instruction index. 1349 std::vector<RewriteInfo> RewriteMIs; 1350 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1351 re = mri_->reg_end(); ri != re; ) { 1352 MachineInstr *MI = &*ri; 1353 MachineOperand &O = ri.getOperand(); 1354 ++ri; 1355 assert(!O.isImplicit() && "Spilling register that's used as implicit use?"); 1356 unsigned index = getInstructionIndex(MI); 1357 if (index < start || index >= end) 1358 continue; 1359 if (O.isUse() && !li.liveAt(getUseIndex(index))) 1360 // Must be defined by an implicit def. It should not be spilled. Note, 1361 // this is for correctness reason. e.g. 1362 // 8 %reg1024<def> = IMPLICIT_DEF 1363 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1364 // The live range [12, 14) are not part of the r1024 live interval since 1365 // it's defined by an implicit def. It will not conflicts with live 1366 // interval of r1025. Now suppose both registers are spilled, you can 1367 // easily see a situation where both registers are reloaded before 1368 // the INSERT_SUBREG and both target registers that would overlap. 1369 continue; 1370 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1371 } 1372 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1373 1374 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1375 // Now rewrite the defs and uses. 1376 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1377 RewriteInfo &rwi = RewriteMIs[i]; 1378 ++i; 1379 unsigned index = rwi.Index; 1380 bool MIHasUse = rwi.HasUse; 1381 bool MIHasDef = rwi.HasDef; 1382 MachineInstr *MI = rwi.MI; 1383 // If MI def and/or use the same register multiple times, then there 1384 // are multiple entries. 1385 unsigned NumUses = MIHasUse; 1386 while (i != e && RewriteMIs[i].MI == MI) { 1387 assert(RewriteMIs[i].Index == index); 1388 bool isUse = RewriteMIs[i].HasUse; 1389 if (isUse) ++NumUses; 1390 MIHasUse |= isUse; 1391 MIHasDef |= RewriteMIs[i].HasDef; 1392 ++i; 1393 } 1394 MachineBasicBlock *MBB = MI->getParent(); 1395 1396 if (ImpUse && MI != ReMatDefMI) { 1397 // Re-matting an instruction with virtual register use. Update the 1398 // register interval's spill weight to HUGE_VALF to prevent it from 1399 // being spilled. 1400 LiveInterval &ImpLi = getInterval(ImpUse); 1401 ImpLi.weight = HUGE_VALF; 1402 } 1403 1404 unsigned MBBId = MBB->getNumber(); 1405 unsigned ThisVReg = 0; 1406 if (TrySplit) { 1407 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1408 if (NVI != MBBVRegsMap.end()) { 1409 ThisVReg = NVI->second; 1410 // One common case: 1411 // x = use 1412 // ... 1413 // ... 1414 // def = ... 1415 // = use 1416 // It's better to start a new interval to avoid artifically 1417 // extend the new interval. 1418 if (MIHasDef && !MIHasUse) { 1419 MBBVRegsMap.erase(MBB->getNumber()); 1420 ThisVReg = 0; 1421 } 1422 } 1423 } 1424 1425 bool IsNew = ThisVReg == 0; 1426 if (IsNew) { 1427 // This ends the previous live interval. If all of its def / use 1428 // can be folded, give it a low spill weight. 1429 if (NewVReg && TrySplit && AllCanFold) { 1430 LiveInterval &nI = getOrCreateInterval(NewVReg); 1431 nI.weight /= 10.0F; 1432 } 1433 AllCanFold = true; 1434 } 1435 NewVReg = ThisVReg; 1436 1437 bool HasDef = false; 1438 bool HasUse = false; 1439 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1440 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1441 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1442 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1443 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight); 1444 if (!HasDef && !HasUse) 1445 continue; 1446 1447 AllCanFold &= CanFold; 1448 1449 // Update weight of spill interval. 1450 LiveInterval &nI = getOrCreateInterval(NewVReg); 1451 if (!TrySplit) { 1452 // The spill weight is now infinity as it cannot be spilled again. 1453 nI.weight = HUGE_VALF; 1454 continue; 1455 } 1456 1457 // Keep track of the last def and first use in each MBB. 1458 if (HasDef) { 1459 if (MI != ReMatOrigDefMI || !CanDelete) { 1460 bool HasKill = false; 1461 if (!HasUse) 1462 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index)); 1463 else { 1464 // If this is a two-address code, then this index starts a new VNInfo. 1465 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index)); 1466 if (VNI) 1467 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index)); 1468 } 1469 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1470 SpillIdxes.find(MBBId); 1471 if (!HasKill) { 1472 if (SII == SpillIdxes.end()) { 1473 std::vector<SRInfo> S; 1474 S.push_back(SRInfo(index, NewVReg, true)); 1475 SpillIdxes.insert(std::make_pair(MBBId, S)); 1476 } else if (SII->second.back().vreg != NewVReg) { 1477 SII->second.push_back(SRInfo(index, NewVReg, true)); 1478 } else if ((int)index > SII->second.back().index) { 1479 // If there is an earlier def and this is a two-address 1480 // instruction, then it's not possible to fold the store (which 1481 // would also fold the load). 1482 SRInfo &Info = SII->second.back(); 1483 Info.index = index; 1484 Info.canFold = !HasUse; 1485 } 1486 SpillMBBs.set(MBBId); 1487 } else if (SII != SpillIdxes.end() && 1488 SII->second.back().vreg == NewVReg && 1489 (int)index > SII->second.back().index) { 1490 // There is an earlier def that's not killed (must be two-address). 1491 // The spill is no longer needed. 1492 SII->second.pop_back(); 1493 if (SII->second.empty()) { 1494 SpillIdxes.erase(MBBId); 1495 SpillMBBs.reset(MBBId); 1496 } 1497 } 1498 } 1499 } 1500 1501 if (HasUse) { 1502 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1503 SpillIdxes.find(MBBId); 1504 if (SII != SpillIdxes.end() && 1505 SII->second.back().vreg == NewVReg && 1506 (int)index > SII->second.back().index) 1507 // Use(s) following the last def, it's not safe to fold the spill. 1508 SII->second.back().canFold = false; 1509 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1510 RestoreIdxes.find(MBBId); 1511 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1512 // If we are splitting live intervals, only fold if it's the first 1513 // use and there isn't another use later in the MBB. 1514 RII->second.back().canFold = false; 1515 else if (IsNew) { 1516 // Only need a reload if there isn't an earlier def / use. 1517 if (RII == RestoreIdxes.end()) { 1518 std::vector<SRInfo> Infos; 1519 Infos.push_back(SRInfo(index, NewVReg, true)); 1520 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1521 } else { 1522 RII->second.push_back(SRInfo(index, NewVReg, true)); 1523 } 1524 RestoreMBBs.set(MBBId); 1525 } 1526 } 1527 1528 // Update spill weight. 1529 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1530 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1531 } 1532 1533 if (NewVReg && TrySplit && AllCanFold) { 1534 // If all of its def / use can be folded, give it a low spill weight. 1535 LiveInterval &nI = getOrCreateInterval(NewVReg); 1536 nI.weight /= 10.0F; 1537 } 1538} 1539 1540bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr, 1541 BitVector &RestoreMBBs, 1542 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1543 if (!RestoreMBBs[Id]) 1544 return false; 1545 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1546 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1547 if (Restores[i].index == index && 1548 Restores[i].vreg == vr && 1549 Restores[i].canFold) 1550 return true; 1551 return false; 1552} 1553 1554void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr, 1555 BitVector &RestoreMBBs, 1556 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1557 if (!RestoreMBBs[Id]) 1558 return; 1559 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1560 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1561 if (Restores[i].index == index && Restores[i].vreg) 1562 Restores[i].index = -1; 1563} 1564 1565/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1566/// spilled and create empty intervals for their uses. 1567void 1568LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1569 const TargetRegisterClass* rc, 1570 std::vector<LiveInterval*> &NewLIs) { 1571 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1572 re = mri_->reg_end(); ri != re; ) { 1573 MachineOperand &O = ri.getOperand(); 1574 MachineInstr *MI = &*ri; 1575 ++ri; 1576 if (O.isDef()) { 1577 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF && 1578 "Register def was not rewritten?"); 1579 RemoveMachineInstrFromMaps(MI); 1580 vrm.RemoveMachineInstrFromMaps(MI); 1581 MI->eraseFromParent(); 1582 } else { 1583 // This must be an use of an implicit_def so it's not part of the live 1584 // interval. Create a new empty live interval for it. 1585 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1586 unsigned NewVReg = mri_->createVirtualRegister(rc); 1587 vrm.grow(); 1588 vrm.setIsImplicitlyDefined(NewVReg); 1589 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1590 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1591 MachineOperand &MO = MI->getOperand(i); 1592 if (MO.isReg() && MO.getReg() == li.reg) 1593 MO.setReg(NewVReg); 1594 } 1595 } 1596 } 1597} 1598 1599namespace { 1600 struct LISorter { 1601 bool operator()(LiveInterval* A, LiveInterval* B) { 1602 return A->beginNumber() < B->beginNumber(); 1603 } 1604 }; 1605} 1606 1607std::vector<LiveInterval*> LiveIntervals:: 1608addIntervalsForSpillsFast(const LiveInterval &li, 1609 const MachineLoopInfo *loopInfo, 1610 VirtRegMap &vrm, float& SSWeight) { 1611 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1612 1613 // since this is called after the analysis is done we don't know if 1614 // LiveVariables is available 1615 lv_ = getAnalysisToUpdate<LiveVariables>(); 1616 1617 std::vector<LiveInterval*> added; 1618 1619 assert(li.weight != HUGE_VALF && 1620 "attempt to spill already spilled interval!"); 1621 1622 DOUT << "\t\t\t\tadding intervals for spills for interval: "; 1623 DEBUG(li.dump()); 1624 DOUT << '\n'; 1625 1626 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1627 1628 DenseMap<MachineInstr*, unsigned> VRegMap; 1629 DenseMap<MachineInstr*, VNInfo*> VNMap; 1630 1631 for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg), 1632 RE = mri_->reg_end(); RI != RE; ) { 1633 // Create a new virtual register for the spill interval. 1634 MachineOperand& MO = RI.getOperand(); 1635 unsigned NewVReg = 0; 1636 if (!VRegMap.count(MO.getParent())) 1637 VRegMap[MO.getParent()] = NewVReg = mri_->createVirtualRegister(rc); 1638 else 1639 NewVReg = VRegMap[MO.getParent()]; 1640 1641 // Increment iterator to avoid invalidation. 1642 ++RI; 1643 1644 MO.setReg(NewVReg); 1645 1646 // create a new register for this spill 1647 vrm.grow(); 1648 vrm.assignVirt2StackSlot(NewVReg, slot); 1649 LiveInterval &nI = getOrCreateInterval(NewVReg); 1650 assert(nI.empty()); 1651 1652 // the spill weight is now infinity as it 1653 // cannot be spilled again 1654 nI.weight = HUGE_VALF; 1655 1656 unsigned index = getInstructionIndex(MO.getParent()); 1657 bool HasUse = MO.isUse(); 1658 bool HasDef = MO.isDef(); 1659 if (!VNMap.count(MO.getParent())) 1660 VNMap[MO.getParent()] = nI.getNextValue(~0U, 0, getVNInfoAllocator()); 1661 if (HasUse) { 1662 LiveRange LR(getLoadIndex(index), getUseIndex(index), 1663 VNMap[MO.getParent()]); 1664 DOUT << " +" << LR; 1665 nI.addRange(LR); 1666 } 1667 if (HasDef) { 1668 LiveRange LR(getDefIndex(index), getStoreIndex(index), 1669 VNMap[MO.getParent()]); 1670 DOUT << " +" << LR; 1671 nI.addRange(LR); 1672 } 1673 1674 added.push_back(&nI); 1675 1676 // update live variables if it is available 1677 if (lv_) 1678 lv_->addVirtualRegisterKilled(NewVReg, MO.getParent()); 1679 1680 DOUT << "\t\t\t\tadded new interval: "; 1681 DEBUG(nI.dump()); 1682 DOUT << '\n'; 1683 } 1684 1685 SSWeight = HUGE_VALF; 1686 1687 std::sort(added.begin(), added.end(), LISorter()); 1688 1689 return added; 1690} 1691 1692std::vector<LiveInterval*> LiveIntervals:: 1693addIntervalsForSpills(const LiveInterval &li, 1694 const MachineLoopInfo *loopInfo, VirtRegMap &vrm, 1695 float &SSWeight) { 1696 assert(li.weight != HUGE_VALF && 1697 "attempt to spill already spilled interval!"); 1698 1699 DOUT << "\t\t\t\tadding intervals for spills for interval: "; 1700 li.print(DOUT, tri_); 1701 DOUT << '\n'; 1702 1703 // Spill slot weight. 1704 SSWeight = 0.0f; 1705 1706 // Each bit specify whether it a spill is required in the MBB. 1707 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1708 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1709 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1710 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1711 DenseMap<unsigned,unsigned> MBBVRegsMap; 1712 std::vector<LiveInterval*> NewLIs; 1713 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1714 1715 unsigned NumValNums = li.getNumValNums(); 1716 SmallVector<MachineInstr*, 4> ReMatDefs; 1717 ReMatDefs.resize(NumValNums, NULL); 1718 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1719 ReMatOrigDefs.resize(NumValNums, NULL); 1720 SmallVector<int, 4> ReMatIds; 1721 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1722 BitVector ReMatDelete(NumValNums); 1723 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1724 1725 // Spilling a split live interval. It cannot be split any further. Also, 1726 // it's also guaranteed to be a single val# / range interval. 1727 if (vrm.getPreSplitReg(li.reg)) { 1728 vrm.setIsSplitFromReg(li.reg, 0); 1729 // Unset the split kill marker on the last use. 1730 unsigned KillIdx = vrm.getKillPoint(li.reg); 1731 if (KillIdx) { 1732 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1733 assert(KillMI && "Last use disappeared?"); 1734 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1735 assert(KillOp != -1 && "Last use disappeared?"); 1736 KillMI->getOperand(KillOp).setIsKill(false); 1737 } 1738 vrm.removeKillPoint(li.reg); 1739 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1740 Slot = vrm.getStackSlot(li.reg); 1741 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1742 MachineInstr *ReMatDefMI = DefIsReMat ? 1743 vrm.getReMaterializedMI(li.reg) : NULL; 1744 int LdSlot = 0; 1745 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1746 bool isLoad = isLoadSS || 1747 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad())); 1748 bool IsFirstRange = true; 1749 for (LiveInterval::Ranges::const_iterator 1750 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1751 // If this is a split live interval with multiple ranges, it means there 1752 // are two-address instructions that re-defined the value. Only the 1753 // first def can be rematerialized! 1754 if (IsFirstRange) { 1755 // Note ReMatOrigDefMI has already been deleted. 1756 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1757 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1758 false, vrm, rc, ReMatIds, loopInfo, 1759 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1760 MBBVRegsMap, NewLIs, SSWeight); 1761 } else { 1762 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1763 Slot, 0, false, false, false, 1764 false, vrm, rc, ReMatIds, loopInfo, 1765 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1766 MBBVRegsMap, NewLIs, SSWeight); 1767 } 1768 IsFirstRange = false; 1769 } 1770 1771 SSWeight = 0.0f; // Already accounted for when split. 1772 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1773 return NewLIs; 1774 } 1775 1776 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li); 1777 if (SplitLimit != -1 && (int)numSplits >= SplitLimit) 1778 TrySplit = false; 1779 if (TrySplit) 1780 ++numSplits; 1781 bool NeedStackSlot = false; 1782 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1783 i != e; ++i) { 1784 const VNInfo *VNI = *i; 1785 unsigned VN = VNI->id; 1786 unsigned DefIdx = VNI->def; 1787 if (DefIdx == ~1U) 1788 continue; // Dead val#. 1789 // Is the def for the val# rematerializable? 1790 MachineInstr *ReMatDefMI = (DefIdx == ~0u) 1791 ? 0 : getInstructionFromIndex(DefIdx); 1792 bool dummy; 1793 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) { 1794 // Remember how to remat the def of this val#. 1795 ReMatOrigDefs[VN] = ReMatDefMI; 1796 // Original def may be modified so we have to make a copy here. 1797 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1798 ClonedMIs.push_back(Clone); 1799 ReMatDefs[VN] = Clone; 1800 1801 bool CanDelete = true; 1802 if (VNI->hasPHIKill) { 1803 // A kill is a phi node, not all of its uses can be rematerialized. 1804 // It must not be deleted. 1805 CanDelete = false; 1806 // Need a stack slot if there is any live range where uses cannot be 1807 // rematerialized. 1808 NeedStackSlot = true; 1809 } 1810 if (CanDelete) 1811 ReMatDelete.set(VN); 1812 } else { 1813 // Need a stack slot if there is any live range where uses cannot be 1814 // rematerialized. 1815 NeedStackSlot = true; 1816 } 1817 } 1818 1819 // One stack slot per live interval. 1820 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) 1821 Slot = vrm.assignVirt2StackSlot(li.reg); 1822 1823 // Create new intervals and rewrite defs and uses. 1824 for (LiveInterval::Ranges::const_iterator 1825 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1826 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1827 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1828 bool DefIsReMat = ReMatDefMI != NULL; 1829 bool CanDelete = ReMatDelete[I->valno->id]; 1830 int LdSlot = 0; 1831 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1832 bool isLoad = isLoadSS || 1833 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad()); 1834 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1835 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1836 CanDelete, vrm, rc, ReMatIds, loopInfo, 1837 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1838 MBBVRegsMap, NewLIs, SSWeight); 1839 } 1840 1841 // Insert spills / restores if we are splitting. 1842 if (!TrySplit) { 1843 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1844 return NewLIs; 1845 } 1846 1847 SmallPtrSet<LiveInterval*, 4> AddedKill; 1848 SmallVector<unsigned, 2> Ops; 1849 if (NeedStackSlot) { 1850 int Id = SpillMBBs.find_first(); 1851 while (Id != -1) { 1852 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id); 1853 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1854 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1855 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1856 int index = spills[i].index; 1857 unsigned VReg = spills[i].vreg; 1858 LiveInterval &nI = getOrCreateInterval(VReg); 1859 bool isReMat = vrm.isReMaterialized(VReg); 1860 MachineInstr *MI = getInstructionFromIndex(index); 1861 bool CanFold = false; 1862 bool FoundUse = false; 1863 Ops.clear(); 1864 if (spills[i].canFold) { 1865 CanFold = true; 1866 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1867 MachineOperand &MO = MI->getOperand(j); 1868 if (!MO.isRegister() || MO.getReg() != VReg) 1869 continue; 1870 1871 Ops.push_back(j); 1872 if (MO.isDef()) 1873 continue; 1874 if (isReMat || 1875 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1876 RestoreMBBs, RestoreIdxes))) { 1877 // MI has two-address uses of the same register. If the use 1878 // isn't the first and only use in the BB, then we can't fold 1879 // it. FIXME: Move this to rewriteInstructionsForSpills. 1880 CanFold = false; 1881 break; 1882 } 1883 FoundUse = true; 1884 } 1885 } 1886 // Fold the store into the def if possible. 1887 bool Folded = false; 1888 if (CanFold && !Ops.empty()) { 1889 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1890 Folded = true; 1891 if (FoundUse > 0) { 1892 // Also folded uses, do not issue a load. 1893 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1894 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); 1895 } 1896 nI.removeRange(getDefIndex(index), getStoreIndex(index)); 1897 } 1898 } 1899 1900 // Otherwise tell the spiller to issue a spill. 1901 if (!Folded) { 1902 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1903 bool isKill = LR->end == getStoreIndex(index); 1904 if (!MI->registerDefIsDead(nI.reg)) 1905 // No need to spill a dead def. 1906 vrm.addSpillPoint(VReg, isKill, MI); 1907 if (isKill) 1908 AddedKill.insert(&nI); 1909 } 1910 1911 // Update spill slot weight. 1912 if (!isReMat) 1913 SSWeight += getSpillWeight(true, false, loopDepth); 1914 } 1915 Id = SpillMBBs.find_next(Id); 1916 } 1917 } 1918 1919 int Id = RestoreMBBs.find_first(); 1920 while (Id != -1) { 1921 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id); 1922 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1923 1924 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1925 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1926 int index = restores[i].index; 1927 if (index == -1) 1928 continue; 1929 unsigned VReg = restores[i].vreg; 1930 LiveInterval &nI = getOrCreateInterval(VReg); 1931 bool isReMat = vrm.isReMaterialized(VReg); 1932 MachineInstr *MI = getInstructionFromIndex(index); 1933 bool CanFold = false; 1934 Ops.clear(); 1935 if (restores[i].canFold) { 1936 CanFold = true; 1937 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1938 MachineOperand &MO = MI->getOperand(j); 1939 if (!MO.isRegister() || MO.getReg() != VReg) 1940 continue; 1941 1942 if (MO.isDef()) { 1943 // If this restore were to be folded, it would have been folded 1944 // already. 1945 CanFold = false; 1946 break; 1947 } 1948 Ops.push_back(j); 1949 } 1950 } 1951 1952 // Fold the load into the use if possible. 1953 bool Folded = false; 1954 if (CanFold && !Ops.empty()) { 1955 if (!isReMat) 1956 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1957 else { 1958 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1959 int LdSlot = 0; 1960 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1961 // If the rematerializable def is a load, also try to fold it. 1962 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad()) 1963 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1964 Ops, isLoadSS, LdSlot, VReg); 1965 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1966 if (ImpUse) { 1967 // Re-matting an instruction with virtual register use. Add the 1968 // register as an implicit use on the use MI and update the register 1969 // interval's spill weight to HUGE_VALF to prevent it from being 1970 // spilled. 1971 LiveInterval &ImpLi = getInterval(ImpUse); 1972 ImpLi.weight = HUGE_VALF; 1973 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1974 } 1975 } 1976 } 1977 // If folding is not possible / failed, then tell the spiller to issue a 1978 // load / rematerialization for us. 1979 if (Folded) 1980 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); 1981 else 1982 vrm.addRestorePoint(VReg, MI); 1983 1984 // Update spill slot weight. 1985 if (!isReMat) 1986 SSWeight += getSpillWeight(false, true, loopDepth); 1987 } 1988 Id = RestoreMBBs.find_next(Id); 1989 } 1990 1991 // Finalize intervals: add kills, finalize spill weights, and filter out 1992 // dead intervals. 1993 std::vector<LiveInterval*> RetNewLIs; 1994 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1995 LiveInterval *LI = NewLIs[i]; 1996 if (!LI->empty()) { 1997 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI); 1998 if (!AddedKill.count(LI)) { 1999 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 2000 unsigned LastUseIdx = getBaseIndex(LR->end); 2001 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 2002 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 2003 assert(UseIdx != -1); 2004 if (LastUse->getOperand(UseIdx).isImplicit() || 2005 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){ 2006 LastUse->getOperand(UseIdx).setIsKill(); 2007 vrm.addKillPoint(LI->reg, LastUseIdx); 2008 } 2009 } 2010 RetNewLIs.push_back(LI); 2011 } 2012 } 2013 2014 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 2015 return RetNewLIs; 2016} 2017 2018/// hasAllocatableSuperReg - Return true if the specified physical register has 2019/// any super register that's allocatable. 2020bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 2021 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 2022 if (allocatableRegs_[*AS] && hasInterval(*AS)) 2023 return true; 2024 return false; 2025} 2026 2027/// getRepresentativeReg - Find the largest super register of the specified 2028/// physical register. 2029unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 2030 // Find the largest super-register that is allocatable. 2031 unsigned BestReg = Reg; 2032 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 2033 unsigned SuperReg = *AS; 2034 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 2035 BestReg = SuperReg; 2036 break; 2037 } 2038 } 2039 return BestReg; 2040} 2041 2042/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 2043/// specified interval that conflicts with the specified physical register. 2044unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 2045 unsigned PhysReg) const { 2046 unsigned NumConflicts = 0; 2047 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 2048 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2049 E = mri_->reg_end(); I != E; ++I) { 2050 MachineOperand &O = I.getOperand(); 2051 MachineInstr *MI = O.getParent(); 2052 unsigned Index = getInstructionIndex(MI); 2053 if (pli.liveAt(Index)) 2054 ++NumConflicts; 2055 } 2056 return NumConflicts; 2057} 2058 2059/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 2060/// around all defs and uses of the specified interval. 2061void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 2062 unsigned PhysReg, VirtRegMap &vrm) { 2063 unsigned SpillReg = getRepresentativeReg(PhysReg); 2064 2065 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 2066 // If there are registers which alias PhysReg, but which are not a 2067 // sub-register of the chosen representative super register. Assert 2068 // since we can't handle it yet. 2069 assert(*AS == SpillReg || !allocatableRegs_[*AS] || 2070 tri_->isSuperRegister(*AS, SpillReg)); 2071 2072 LiveInterval &pli = getInterval(SpillReg); 2073 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2074 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2075 E = mri_->reg_end(); I != E; ++I) { 2076 MachineOperand &O = I.getOperand(); 2077 MachineInstr *MI = O.getParent(); 2078 if (SeenMIs.count(MI)) 2079 continue; 2080 SeenMIs.insert(MI); 2081 unsigned Index = getInstructionIndex(MI); 2082 if (pli.liveAt(Index)) { 2083 vrm.addEmergencySpill(SpillReg, MI); 2084 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1); 2085 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) { 2086 if (!hasInterval(*AS)) 2087 continue; 2088 LiveInterval &spli = getInterval(*AS); 2089 if (spli.liveAt(Index)) 2090 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1); 2091 } 2092 } 2093 } 2094} 2095 2096LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2097 MachineInstr* startInst) { 2098 LiveInterval& Interval = getOrCreateInterval(reg); 2099 VNInfo* VN = Interval.getNextValue( 2100 getInstructionIndex(startInst) + InstrSlots::DEF, 2101 startInst, getVNInfoAllocator()); 2102 VN->hasPHIKill = true; 2103 VN->kills.push_back(getMBBEndIdx(startInst->getParent())); 2104 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF, 2105 getMBBEndIdx(startInst->getParent()) + 1, VN); 2106 Interval.addRange(LR); 2107 2108 return LR; 2109} 2110