LiveIntervalAnalysis.cpp revision c60e6020c0dd1260b0d60835e2ab823f97a4b810
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/LoopInfo.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/Passes.h" 27#include "llvm/CodeGen/SSARegMap.h" 28#include "llvm/Target/MRegisterInfo.h" 29#include "llvm/Target/TargetInstrInfo.h" 30#include "llvm/Target/TargetMachine.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/STLExtras.h" 35#include <algorithm> 36#include <cmath> 37using namespace llvm; 38 39namespace { 40 RegisterAnalysis<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 41 42 Statistic<> numIntervals 43 ("liveintervals", "Number of original intervals"); 44 45 Statistic<> numIntervalsAfter 46 ("liveintervals", "Number of intervals after coalescing"); 47 48 Statistic<> numJoins 49 ("liveintervals", "Number of interval joins performed"); 50 51 Statistic<> numPeep 52 ("liveintervals", "Number of identity moves eliminated after coalescing"); 53 54 Statistic<> numFolded 55 ("liveintervals", "Number of loads/stores folded into instructions"); 56 57 cl::opt<bool> 58 EnableJoining("join-liveintervals", 59 cl::desc("Join compatible live intervals"), 60 cl::init(true)); 61}; 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const 64{ 65 AU.addRequired<LiveVariables>(); 66 AU.addPreservedID(PHIEliminationID); 67 AU.addRequiredID(PHIEliminationID); 68 AU.addRequiredID(TwoAddressInstructionPassID); 69 AU.addRequired<LoopInfo>(); 70 MachineFunctionPass::getAnalysisUsage(AU); 71} 72 73void LiveIntervals::releaseMemory() 74{ 75 mi2iMap_.clear(); 76 i2miMap_.clear(); 77 r2iMap_.clear(); 78 r2rMap_.clear(); 79} 80 81 82/// runOnMachineFunction - Register allocate the whole function 83/// 84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 85 mf_ = &fn; 86 tm_ = &fn.getTarget(); 87 mri_ = tm_->getRegisterInfo(); 88 tii_ = tm_->getInstrInfo(); 89 lv_ = &getAnalysis<LiveVariables>(); 90 allocatableRegs_ = mri_->getAllocatableSet(fn); 91 r2rMap_.grow(mf_->getSSARegMap()->getLastVirtReg()); 92 93 // If this function has any live ins, insert a dummy instruction at the 94 // beginning of the function that we will pretend "defines" the values. This 95 // is to make the interval analysis simpler by providing a number. 96 if (fn.livein_begin() != fn.livein_end()) { 97 unsigned FirstLiveIn = fn.livein_begin()->first; 98 99 // Find a reg class that contains this live in. 100 const TargetRegisterClass *RC = 0; 101 for (MRegisterInfo::regclass_iterator RCI = mri_->regclass_begin(), 102 E = mri_->regclass_end(); RCI != E; ++RCI) 103 if ((*RCI)->contains(FirstLiveIn)) { 104 RC = *RCI; 105 break; 106 } 107 108 MachineInstr *OldFirstMI = fn.begin()->begin(); 109 mri_->copyRegToReg(*fn.begin(), fn.begin()->begin(), 110 FirstLiveIn, FirstLiveIn, RC); 111 assert(OldFirstMI != fn.begin()->begin() && 112 "copyRetToReg didn't insert anything!"); 113 } 114 115 // number MachineInstrs 116 unsigned miIndex = 0; 117 for (MachineFunction::iterator mbb = mf_->begin(), mbbEnd = mf_->end(); 118 mbb != mbbEnd; ++mbb) 119 for (MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end(); 120 mi != miEnd; ++mi) { 121 bool inserted = mi2iMap_.insert(std::make_pair(mi, miIndex)).second; 122 assert(inserted && "multiple MachineInstr -> index mappings"); 123 i2miMap_.push_back(mi); 124 miIndex += InstrSlots::NUM; 125 } 126 127 // Note intervals due to live-in values. 128 if (fn.livein_begin() != fn.livein_end()) { 129 MachineBasicBlock *Entry = fn.begin(); 130 for (MachineFunction::livein_iterator I = fn.livein_begin(), 131 E = fn.livein_end(); I != E; ++I) { 132 handlePhysicalRegisterDef(Entry, Entry->begin(), 133 getOrCreateInterval(I->first), 0, 0, true); 134 for (const unsigned* AS = mri_->getAliasSet(I->first); *AS; ++AS) 135 handlePhysicalRegisterDef(Entry, Entry->begin(), 136 getOrCreateInterval(*AS), 0, 0, true); 137 } 138 } 139 140 computeIntervals(); 141 142 numIntervals += getNumIntervals(); 143 144 DEBUG(std::cerr << "********** INTERVALS **********\n"; 145 for (iterator I = begin(), E = end(); I != E; ++I) { 146 I->second.print(std::cerr, mri_); 147 std::cerr << "\n"; 148 }); 149 150 // join intervals if requested 151 if (EnableJoining) joinIntervals(); 152 153 numIntervalsAfter += getNumIntervals(); 154 155 // perform a final pass over the instructions and compute spill 156 // weights, coalesce virtual registers and remove identity moves 157 const LoopInfo& loopInfo = getAnalysis<LoopInfo>(); 158 159 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 160 mbbi != mbbe; ++mbbi) { 161 MachineBasicBlock* mbb = mbbi; 162 unsigned loopDepth = loopInfo.getLoopDepth(mbb->getBasicBlock()); 163 164 for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end(); 165 mii != mie; ) { 166 // if the move will be an identity move delete it 167 unsigned srcReg, dstReg, RegRep; 168 if (tii_->isMoveInstr(*mii, srcReg, dstReg) && 169 (RegRep = rep(srcReg)) == rep(dstReg)) { 170 // remove from def list 171 LiveInterval &interval = getOrCreateInterval(RegRep); 172 // remove index -> MachineInstr and 173 // MachineInstr -> index mappings 174 Mi2IndexMap::iterator mi2i = mi2iMap_.find(mii); 175 if (mi2i != mi2iMap_.end()) { 176 i2miMap_[mi2i->second/InstrSlots::NUM] = 0; 177 mi2iMap_.erase(mi2i); 178 } 179 mii = mbbi->erase(mii); 180 ++numPeep; 181 } 182 else { 183 for (unsigned i = 0; i < mii->getNumOperands(); ++i) { 184 const MachineOperand& mop = mii->getOperand(i); 185 if (mop.isRegister() && mop.getReg() && 186 MRegisterInfo::isVirtualRegister(mop.getReg())) { 187 // replace register with representative register 188 unsigned reg = rep(mop.getReg()); 189 mii->SetMachineOperandReg(i, reg); 190 191 LiveInterval &RegInt = getInterval(reg); 192 RegInt.weight += 193 (mop.isUse() + mop.isDef()) * pow(10.0F, (int)loopDepth); 194 } 195 } 196 ++mii; 197 } 198 } 199 } 200 201 DEBUG(dump()); 202 return true; 203} 204 205/// print - Implement the dump method. 206void LiveIntervals::print(std::ostream &O, const Module* ) const { 207 O << "********** INTERVALS **********\n"; 208 for (const_iterator I = begin(), E = end(); I != E; ++I) { 209 I->second.print(std::cerr, mri_); 210 std::cerr << "\n"; 211 } 212 213 O << "********** MACHINEINSTRS **********\n"; 214 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 215 mbbi != mbbe; ++mbbi) { 216 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n"; 217 for (MachineBasicBlock::iterator mii = mbbi->begin(), 218 mie = mbbi->end(); mii != mie; ++mii) { 219 O << getInstructionIndex(mii) << '\t' << *mii; 220 } 221 } 222} 223 224std::vector<LiveInterval*> LiveIntervals:: 225addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm, int slot) { 226 // since this is called after the analysis is done we don't know if 227 // LiveVariables is available 228 lv_ = getAnalysisToUpdate<LiveVariables>(); 229 230 std::vector<LiveInterval*> added; 231 232 assert(li.weight != HUGE_VAL && 233 "attempt to spill already spilled interval!"); 234 235 DEBUG(std::cerr << "\t\t\t\tadding intervals for spills for interval: " 236 << li << '\n'); 237 238 const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(li.reg); 239 240 for (LiveInterval::Ranges::const_iterator 241 i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) { 242 unsigned index = getBaseIndex(i->start); 243 unsigned end = getBaseIndex(i->end-1) + InstrSlots::NUM; 244 for (; index != end; index += InstrSlots::NUM) { 245 // skip deleted instructions 246 while (index != end && !getInstructionFromIndex(index)) 247 index += InstrSlots::NUM; 248 if (index == end) break; 249 250 MachineBasicBlock::iterator mi = getInstructionFromIndex(index); 251 252 // NewRegLiveIn - This instruction might have multiple uses of the spilled 253 // register. In this case, for the first use, keep track of the new vreg 254 // that we reload it into. If we see a second use, reuse this vreg 255 // instead of creating live ranges for two reloads. 256 unsigned NewRegLiveIn = 0; 257 258 for_operand: 259 for (unsigned i = 0; i != mi->getNumOperands(); ++i) { 260 MachineOperand& mop = mi->getOperand(i); 261 if (mop.isRegister() && mop.getReg() == li.reg) { 262 if (NewRegLiveIn && mop.isUse()) { 263 // We already emitted a reload of this value, reuse it for 264 // subsequent operands. 265 mi->SetMachineOperandReg(i, NewRegLiveIn); 266 DEBUG(std::cerr << "\t\t\t\treused reload into reg" << NewRegLiveIn 267 << " for operand #" << i << '\n'); 268 } else if (MachineInstr* fmi = mri_->foldMemoryOperand(mi, i, slot)) { 269 // Attempt to fold the memory reference into the instruction. If we 270 // can do this, we don't need to insert spill code. 271 if (lv_) 272 lv_->instructionChanged(mi, fmi); 273 vrm.virtFolded(li.reg, mi, i, fmi); 274 mi2iMap_.erase(mi); 275 i2miMap_[index/InstrSlots::NUM] = fmi; 276 mi2iMap_[fmi] = index; 277 MachineBasicBlock &MBB = *mi->getParent(); 278 mi = MBB.insert(MBB.erase(mi), fmi); 279 ++numFolded; 280 281 // Folding the load/store can completely change the instruction in 282 // unpredictable ways, rescan it from the beginning. 283 goto for_operand; 284 } else { 285 // This is tricky. We need to add information in the interval about 286 // the spill code so we have to use our extra load/store slots. 287 // 288 // If we have a use we are going to have a load so we start the 289 // interval from the load slot onwards. Otherwise we start from the 290 // def slot. 291 unsigned start = (mop.isUse() ? 292 getLoadIndex(index) : 293 getDefIndex(index)); 294 // If we have a def we are going to have a store right after it so 295 // we end the interval after the use of the next 296 // instruction. Otherwise we end after the use of this instruction. 297 unsigned end = 1 + (mop.isDef() ? 298 getStoreIndex(index) : 299 getUseIndex(index)); 300 301 // create a new register for this spill 302 NewRegLiveIn = mf_->getSSARegMap()->createVirtualRegister(rc); 303 mi->SetMachineOperandReg(i, NewRegLiveIn); 304 vrm.grow(); 305 vrm.assignVirt2StackSlot(NewRegLiveIn, slot); 306 LiveInterval& nI = getOrCreateInterval(NewRegLiveIn); 307 assert(nI.empty()); 308 309 // the spill weight is now infinity as it 310 // cannot be spilled again 311 nI.weight = float(HUGE_VAL); 312 LiveRange LR(start, end, nI.getNextValue()); 313 DEBUG(std::cerr << " +" << LR); 314 nI.addRange(LR); 315 added.push_back(&nI); 316 317 // update live variables if it is available 318 if (lv_) 319 lv_->addVirtualRegisterKilled(NewRegLiveIn, mi); 320 321 // If this is a live in, reuse it for subsequent live-ins. If it's 322 // a def, we can't do this. 323 if (!mop.isUse()) NewRegLiveIn = 0; 324 325 DEBUG(std::cerr << "\t\t\t\tadded new interval: " << nI << '\n'); 326 } 327 } 328 } 329 } 330 } 331 332 return added; 333} 334 335void LiveIntervals::printRegName(unsigned reg) const 336{ 337 if (MRegisterInfo::isPhysicalRegister(reg)) 338 std::cerr << mri_->getName(reg); 339 else 340 std::cerr << "%reg" << reg; 341} 342 343void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock* mbb, 344 MachineBasicBlock::iterator mi, 345 LiveInterval& interval) 346{ 347 DEBUG(std::cerr << "\t\tregister: "; printRegName(interval.reg)); 348 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 349 350 // Virtual registers may be defined multiple times (due to phi 351 // elimination and 2-addr elimination). Much of what we do only has to be 352 // done once for the vreg. We use an empty interval to detect the first 353 // time we see a vreg. 354 if (interval.empty()) { 355 // Get the Idx of the defining instructions. 356 unsigned defIndex = getDefIndex(getInstructionIndex(mi)); 357 358 unsigned ValNum = interval.getNextValue(); 359 assert(ValNum == 0 && "First value in interval is not 0?"); 360 ValNum = 0; // Clue in the optimizer. 361 362 // Loop over all of the blocks that the vreg is defined in. There are 363 // two cases we have to handle here. The most common case is a vreg 364 // whose lifetime is contained within a basic block. In this case there 365 // will be a single kill, in MBB, which comes after the definition. 366 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 367 // FIXME: what about dead vars? 368 unsigned killIdx; 369 if (vi.Kills[0] != mi) 370 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1; 371 else 372 killIdx = defIndex+1; 373 374 // If the kill happens after the definition, we have an intra-block 375 // live range. 376 if (killIdx > defIndex) { 377 assert(vi.AliveBlocks.empty() && 378 "Shouldn't be alive across any blocks!"); 379 LiveRange LR(defIndex, killIdx, ValNum); 380 interval.addRange(LR); 381 DEBUG(std::cerr << " +" << LR << "\n"); 382 return; 383 } 384 } 385 386 // The other case we handle is when a virtual register lives to the end 387 // of the defining block, potentially live across some blocks, then is 388 // live into some number of blocks, but gets killed. Start by adding a 389 // range that goes from this definition to the end of the defining block. 390 LiveRange NewLR(defIndex, 391 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 392 ValNum); 393 DEBUG(std::cerr << " +" << NewLR); 394 interval.addRange(NewLR); 395 396 // Iterate over all of the blocks that the variable is completely 397 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 398 // live interval. 399 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) { 400 if (vi.AliveBlocks[i]) { 401 MachineBasicBlock* mbb = mf_->getBlockNumbered(i); 402 if (!mbb->empty()) { 403 LiveRange LR(getInstructionIndex(&mbb->front()), 404 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 405 ValNum); 406 interval.addRange(LR); 407 DEBUG(std::cerr << " +" << LR); 408 } 409 } 410 } 411 412 // Finally, this virtual register is live from the start of any killing 413 // block to the 'use' slot of the killing instruction. 414 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 415 MachineInstr *Kill = vi.Kills[i]; 416 LiveRange LR(getInstructionIndex(Kill->getParent()->begin()), 417 getUseIndex(getInstructionIndex(Kill))+1, 418 ValNum); 419 interval.addRange(LR); 420 DEBUG(std::cerr << " +" << LR); 421 } 422 423 } else { 424 // If this is the second time we see a virtual register definition, it 425 // must be due to phi elimination or two addr elimination. If this is 426 // the result of two address elimination, then the vreg is the first 427 // operand, and is a def-and-use. 428 if (mi->getOperand(0).isRegister() && 429 mi->getOperand(0).getReg() == interval.reg && 430 mi->getOperand(0).isDef() && mi->getOperand(0).isUse()) { 431 // If this is a two-address definition, then we have already processed 432 // the live range. The only problem is that we didn't realize there 433 // are actually two values in the live interval. Because of this we 434 // need to take the LiveRegion that defines this register and split it 435 // into two values. 436 unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst)); 437 unsigned RedefIndex = getDefIndex(getInstructionIndex(mi)); 438 439 // Delete the initial value, which should be short and continuous, 440 // becuase the 2-addr copy must be in the same MBB as the redef. 441 interval.removeRange(DefIndex, RedefIndex); 442 443 LiveRange LR(DefIndex, RedefIndex, interval.getNextValue()); 444 DEBUG(std::cerr << " replace range with " << LR); 445 interval.addRange(LR); 446 447 // If this redefinition is dead, we need to add a dummy unit live 448 // range covering the def slot. 449 if (lv_->RegisterDefIsDead(mi, interval.reg)) 450 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, 0)); 451 452 DEBUG(std::cerr << "RESULT: " << interval); 453 454 } else { 455 // Otherwise, this must be because of phi elimination. If this is the 456 // first redefinition of the vreg that we have seen, go back and change 457 // the live range in the PHI block to be a different value number. 458 if (interval.containsOneValue()) { 459 assert(vi.Kills.size() == 1 && 460 "PHI elimination vreg should have one kill, the PHI itself!"); 461 462 // Remove the old range that we now know has an incorrect number. 463 MachineInstr *Killer = vi.Kills[0]; 464 unsigned Start = getInstructionIndex(Killer->getParent()->begin()); 465 unsigned End = getUseIndex(getInstructionIndex(Killer))+1; 466 DEBUG(std::cerr << "Removing [" << Start << "," << End << "] from: " 467 << interval << "\n"); 468 interval.removeRange(Start, End); 469 DEBUG(std::cerr << "RESULT: " << interval); 470 471 // Replace the interval with one of a NEW value number. 472 LiveRange LR(Start, End, interval.getNextValue()); 473 DEBUG(std::cerr << " replace range with " << LR); 474 interval.addRange(LR); 475 DEBUG(std::cerr << "RESULT: " << interval); 476 } 477 478 // In the case of PHI elimination, each variable definition is only 479 // live until the end of the block. We've already taken care of the 480 // rest of the live range. 481 unsigned defIndex = getDefIndex(getInstructionIndex(mi)); 482 LiveRange LR(defIndex, 483 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 484 interval.getNextValue()); 485 interval.addRange(LR); 486 DEBUG(std::cerr << " +" << LR); 487 } 488 } 489 490 DEBUG(std::cerr << '\n'); 491} 492 493void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 494 MachineBasicBlock::iterator mi, 495 LiveInterval& interval, 496 unsigned SrcReg, unsigned DestReg, 497 bool isLiveIn) 498{ 499 // A physical register cannot be live across basic block, so its 500 // lifetime must end somewhere in its defining basic block. 501 DEBUG(std::cerr << "\t\tregister: "; printRegName(interval.reg)); 502 typedef LiveVariables::killed_iterator KillIter; 503 504 unsigned baseIndex = getInstructionIndex(mi); 505 unsigned start = getDefIndex(baseIndex); 506 unsigned end = start; 507 508 // If it is not used after definition, it is considered dead at 509 // the instruction defining it. Hence its interval is: 510 // [defSlot(def), defSlot(def)+1) 511 if (lv_->RegisterDefIsDead(mi, interval.reg)) { 512 DEBUG(std::cerr << " dead"); 513 end = getDefIndex(start) + 1; 514 goto exit; 515 } 516 517 // If it is not dead on definition, it must be killed by a 518 // subsequent instruction. Hence its interval is: 519 // [defSlot(def), useSlot(kill)+1) 520 while (++mi != MBB->end()) { 521 baseIndex += InstrSlots::NUM; 522 if (lv_->KillsRegister(mi, interval.reg)) { 523 DEBUG(std::cerr << " killed"); 524 end = getUseIndex(baseIndex) + 1; 525 goto exit; 526 } 527 } 528 529 // The only case we should have a dead physreg here without a killing or 530 // instruction where we know it's dead is if it is live-in to the function 531 // and never used. 532 assert(isLiveIn && "physreg was not killed in defining block!"); 533 end = getDefIndex(start) + 1; // It's dead. 534 535exit: 536 assert(start < end && "did not find end of interval?"); 537 538 // Finally, if this is defining a new range for the physical register, and if 539 // that physreg is just a copy from a vreg, and if THAT vreg was a copy from 540 // the physreg, then the new fragment has the same value as the one copied 541 // into the vreg. 542 if (interval.reg == DestReg && !interval.empty() && 543 MRegisterInfo::isVirtualRegister(SrcReg)) { 544 545 // Get the live interval for the vreg, see if it is defined by a copy. 546 LiveInterval &SrcInterval = getOrCreateInterval(SrcReg); 547 548 if (SrcInterval.containsOneValue()) { 549 assert(!SrcInterval.empty() && "Can't contain a value and be empty!"); 550 551 // Get the first index of the first range. Though the interval may have 552 // multiple liveranges in it, we only check the first. 553 unsigned StartIdx = SrcInterval.begin()->start; 554 MachineInstr *SrcDefMI = getInstructionFromIndex(StartIdx); 555 556 // Check to see if the vreg was defined by a copy instruction, and that 557 // the source was this physreg. 558 unsigned VRegSrcSrc, VRegSrcDest; 559 if (tii_->isMoveInstr(*SrcDefMI, VRegSrcSrc, VRegSrcDest) && 560 SrcReg == VRegSrcDest && VRegSrcSrc == DestReg) { 561 // Okay, now we know that the vreg was defined by a copy from this 562 // physreg. Find the value number being copied and use it as the value 563 // for this range. 564 const LiveRange *DefRange = interval.getLiveRangeContaining(StartIdx-1); 565 if (DefRange) { 566 LiveRange LR(start, end, DefRange->ValId); 567 interval.addRange(LR); 568 DEBUG(std::cerr << " +" << LR << '\n'); 569 return; 570 } 571 } 572 } 573 } 574 575 576 LiveRange LR(start, end, interval.getNextValue()); 577 interval.addRange(LR); 578 DEBUG(std::cerr << " +" << LR << '\n'); 579} 580 581void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 582 MachineBasicBlock::iterator MI, 583 unsigned reg) { 584 if (MRegisterInfo::isVirtualRegister(reg)) 585 handleVirtualRegisterDef(MBB, MI, getOrCreateInterval(reg)); 586 else if (allocatableRegs_[reg]) { 587 unsigned SrcReg = 0, DestReg = 0; 588 bool IsMove = tii_->isMoveInstr(*MI, SrcReg, DestReg); 589 590 handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(reg), 591 SrcReg, DestReg); 592 for (const unsigned* AS = mri_->getAliasSet(reg); *AS; ++AS) 593 handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(*AS), 594 SrcReg, DestReg); 595 } 596} 597 598/// computeIntervals - computes the live intervals for virtual 599/// registers. for some ordering of the machine instructions [1,N] a 600/// live interval is an interval [i, j) where 1 <= i <= j < N for 601/// which a variable is live 602void LiveIntervals::computeIntervals() 603{ 604 DEBUG(std::cerr << "********** COMPUTING LIVE INTERVALS **********\n"); 605 DEBUG(std::cerr << "********** Function: " 606 << ((Value*)mf_->getFunction())->getName() << '\n'); 607 bool IgnoreFirstInstr = mf_->livein_begin() != mf_->livein_end(); 608 609 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); 610 I != E; ++I) { 611 MachineBasicBlock* mbb = I; 612 DEBUG(std::cerr << ((Value*)mbb->getBasicBlock())->getName() << ":\n"); 613 614 MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end(); 615 if (IgnoreFirstInstr) { ++mi; IgnoreFirstInstr = false; } 616 for (; mi != miEnd; ++mi) { 617 const TargetInstrDescriptor& tid = 618 tm_->getInstrInfo()->get(mi->getOpcode()); 619 DEBUG(std::cerr << getInstructionIndex(mi) << "\t" << *mi); 620 621 // handle implicit defs 622 for (const unsigned* id = tid.ImplicitDefs; *id; ++id) 623 handleRegisterDef(mbb, mi, *id); 624 625 // handle explicit defs 626 for (int i = mi->getNumOperands() - 1; i >= 0; --i) { 627 MachineOperand& mop = mi->getOperand(i); 628 // handle register defs - build intervals 629 if (mop.isRegister() && mop.getReg() && mop.isDef()) 630 handleRegisterDef(mbb, mi, mop.getReg()); 631 } 632 } 633 } 634} 635 636/// IntA is defined as a copy from IntB and we know it only has one value 637/// number. If all of the places that IntA and IntB overlap are defined by 638/// copies from IntA to IntB, we know that these two ranges can really be 639/// merged if we adjust the value numbers. If it is safe, adjust the value 640/// numbers and return true, allowing coalescing to occur. 641bool LiveIntervals:: 642AdjustIfAllOverlappingRangesAreCopiesFrom(LiveInterval &IntA, 643 LiveInterval &IntB, 644 unsigned CopyIdx) { 645 std::vector<LiveRange*> Ranges; 646 IntA.getOverlapingRanges(IntB, CopyIdx, Ranges); 647 648 assert(!Ranges.empty() && "Why didn't we do a simple join of this?"); 649 650 unsigned IntBRep = rep(IntB.reg); 651 652 // Check to see if all of the overlaps (entries in Ranges) are defined by a 653 // copy from IntA. If not, exit. 654 for (unsigned i = 0, e = Ranges.size(); i != e; ++i) { 655 unsigned Idx = Ranges[i]->start; 656 MachineInstr *MI = getInstructionFromIndex(Idx); 657 unsigned SrcReg, DestReg; 658 if (!tii_->isMoveInstr(*MI, SrcReg, DestReg)) return false; 659 660 // If this copy isn't actually defining this range, it must be a live 661 // range spanning basic blocks or something. 662 if (rep(DestReg) != rep(IntA.reg)) return false; 663 664 // Check to see if this is coming from IntB. If not, bail out. 665 if (rep(SrcReg) != IntBRep) return false; 666 } 667 668 // Okay, we can change this one. Get the IntB value number that IntA is 669 // copied from. 670 unsigned ActualValNo = IntA.getLiveRangeContaining(CopyIdx-1)->ValId; 671 672 // Change all of the value numbers to the same as what we IntA is copied from. 673 for (unsigned i = 0, e = Ranges.size(); i != e; ++i) 674 Ranges[i]->ValId = ActualValNo; 675 676 return true; 677} 678 679void LiveIntervals::joinIntervalsInMachineBB(MachineBasicBlock *MBB) { 680 DEBUG(std::cerr << ((Value*)MBB->getBasicBlock())->getName() << ":\n"); 681 682 for (MachineBasicBlock::iterator mi = MBB->begin(), mie = MBB->end(); 683 mi != mie; ++mi) { 684 DEBUG(std::cerr << getInstructionIndex(mi) << '\t' << *mi); 685 686 // we only join virtual registers with allocatable 687 // physical registers since we do not have liveness information 688 // on not allocatable physical registers 689 unsigned SrcReg, DestReg; 690 if (tii_->isMoveInstr(*mi, SrcReg, DestReg) && 691 (MRegisterInfo::isVirtualRegister(SrcReg) || allocatableRegs_[SrcReg])&& 692 (MRegisterInfo::isVirtualRegister(DestReg)||allocatableRegs_[DestReg])){ 693 694 // Get representative registers. 695 SrcReg = rep(SrcReg); 696 DestReg = rep(DestReg); 697 698 // If they are already joined we continue. 699 if (SrcReg == DestReg) 700 continue; 701 702 // If they are both physical registers, we cannot join them. 703 if (MRegisterInfo::isPhysicalRegister(SrcReg) && 704 MRegisterInfo::isPhysicalRegister(DestReg)) 705 continue; 706 707 // If they are not of the same register class, we cannot join them. 708 if (differingRegisterClasses(SrcReg, DestReg)) 709 continue; 710 711 LiveInterval &SrcInt = getInterval(SrcReg); 712 LiveInterval &DestInt = getInterval(DestReg); 713 assert(SrcInt.reg == SrcReg && DestInt.reg == DestReg && 714 "Register mapping is horribly broken!"); 715 716 DEBUG(std::cerr << "\t\tInspecting " << SrcInt << " and " << DestInt 717 << ": "); 718 719 // If two intervals contain a single value and are joined by a copy, it 720 // does not matter if the intervals overlap, they can always be joined. 721 bool Joinable = SrcInt.containsOneValue() && DestInt.containsOneValue(); 722 723 unsigned MIDefIdx = getDefIndex(getInstructionIndex(mi)); 724 725 // If the intervals think that this is joinable, do so now. 726 if (!Joinable && DestInt.joinable(SrcInt, MIDefIdx)) 727 Joinable = true; 728 729 // If DestInt is actually a copy from SrcInt (which we know) that is used 730 // to define another value of SrcInt, we can change the other range of 731 // SrcInt to be the value of the range that defines DestInt, allowing a 732 // coalesce. 733 if (!Joinable && DestInt.containsOneValue() && 734 AdjustIfAllOverlappingRangesAreCopiesFrom(SrcInt, DestInt, MIDefIdx)) 735 Joinable = true; 736 737 if (!Joinable || overlapsAliases(&SrcInt, &DestInt)) { 738 DEBUG(std::cerr << "Interference!\n"); 739 } else { 740 DestInt.join(SrcInt, MIDefIdx); 741 DEBUG(std::cerr << "Joined. Result = " << DestInt << "\n"); 742 743 if (!MRegisterInfo::isPhysicalRegister(SrcReg)) { 744 r2iMap_.erase(SrcReg); 745 r2rMap_[SrcReg] = DestReg; 746 } else { 747 // Otherwise merge the data structures the other way so we don't lose 748 // the physreg information. 749 r2rMap_[DestReg] = SrcReg; 750 DestInt.reg = SrcReg; 751 SrcInt.swap(DestInt); 752 r2iMap_.erase(DestReg); 753 } 754 ++numJoins; 755 } 756 } 757 } 758} 759 760namespace { 761 // DepthMBBCompare - Comparison predicate that sort first based on the loop 762 // depth of the basic block (the unsigned), and then on the MBB number. 763 struct DepthMBBCompare { 764 typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair; 765 bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const { 766 if (LHS.first > RHS.first) return true; // Deeper loops first 767 return LHS.first == RHS.first && 768 LHS.second->getNumber() < RHS.second->getNumber(); 769 } 770 }; 771} 772 773void LiveIntervals::joinIntervals() { 774 DEBUG(std::cerr << "********** JOINING INTERVALS ***********\n"); 775 776 const LoopInfo &LI = getAnalysis<LoopInfo>(); 777 if (LI.begin() == LI.end()) { 778 // If there are no loops in the function, join intervals in function order. 779 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); 780 I != E; ++I) 781 joinIntervalsInMachineBB(I); 782 } else { 783 // Otherwise, join intervals in inner loops before other intervals. 784 // Unfortunately we can't just iterate over loop hierarchy here because 785 // there may be more MBB's than BB's. Collect MBB's for sorting. 786 std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs; 787 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); 788 I != E; ++I) 789 MBBs.push_back(std::make_pair(LI.getLoopDepth(I->getBasicBlock()), I)); 790 791 // Sort by loop depth. 792 std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare()); 793 794 // Finally, join intervals in loop nest order. 795 for (unsigned i = 0, e = MBBs.size(); i != e; ++i) 796 joinIntervalsInMachineBB(MBBs[i].second); 797 } 798 799 DEBUG(std::cerr << "*** Register mapping ***\n"); 800 DEBUG(for (int i = 0, e = r2rMap_.size(); i != e; ++i) 801 if (r2rMap_[i]) 802 std::cerr << " reg " << i << " -> reg " << r2rMap_[i] << "\n"); 803} 804 805/// Return true if the two specified registers belong to different register 806/// classes. The registers may be either phys or virt regs. 807bool LiveIntervals::differingRegisterClasses(unsigned RegA, 808 unsigned RegB) const { 809 810 // Get the register classes for the first reg. 811 if (MRegisterInfo::isPhysicalRegister(RegA)) { 812 assert(MRegisterInfo::isVirtualRegister(RegB) && 813 "Shouldn't consider two physregs!"); 814 return !mf_->getSSARegMap()->getRegClass(RegB)->contains(RegA); 815 } 816 817 // Compare against the regclass for the second reg. 818 const TargetRegisterClass *RegClass = mf_->getSSARegMap()->getRegClass(RegA); 819 if (MRegisterInfo::isVirtualRegister(RegB)) 820 return RegClass != mf_->getSSARegMap()->getRegClass(RegB); 821 else 822 return !RegClass->contains(RegB); 823} 824 825bool LiveIntervals::overlapsAliases(const LiveInterval *LHS, 826 const LiveInterval *RHS) const { 827 if (!MRegisterInfo::isPhysicalRegister(LHS->reg)) { 828 if (!MRegisterInfo::isPhysicalRegister(RHS->reg)) 829 return false; // vreg-vreg merge has no aliases! 830 std::swap(LHS, RHS); 831 } 832 833 assert(MRegisterInfo::isPhysicalRegister(LHS->reg) && 834 MRegisterInfo::isVirtualRegister(RHS->reg) && 835 "first interval must describe a physical register"); 836 837 for (const unsigned *AS = mri_->getAliasSet(LHS->reg); *AS; ++AS) 838 if (RHS->overlaps(getInterval(*AS))) 839 return true; 840 841 return false; 842} 843 844LiveInterval LiveIntervals::createInterval(unsigned reg) { 845 float Weight = MRegisterInfo::isPhysicalRegister(reg) ? 846 (float)HUGE_VAL :0.0F; 847 return LiveInterval(reg, Weight); 848} 849