LiveIntervalAnalysis.cpp revision c4118452bcdae305088e151719abb2a3530caede
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "regalloc" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "llvm/Value.h" 21#include "llvm/Analysis/AliasAnalysis.h" 22#include "llvm/CodeGen/LiveVariables.h" 23#include "llvm/CodeGen/MachineDominators.h" 24#include "llvm/CodeGen/MachineInstr.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/Passes.h" 27#include "llvm/Target/TargetRegisterInfo.h" 28#include "llvm/Target/TargetInstrInfo.h" 29#include "llvm/Target/TargetMachine.h" 30#include "llvm/Support/Debug.h" 31#include "llvm/Support/ErrorHandling.h" 32#include "llvm/Support/raw_ostream.h" 33#include "llvm/ADT/DenseSet.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/ADT/STLExtras.h" 36#include "LiveRangeCalc.h" 37#include <algorithm> 38#include <limits> 39#include <cmath> 40using namespace llvm; 41 42STATISTIC(numIntervals , "Number of original intervals"); 43 44char LiveIntervals::ID = 0; 45INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals", 46 "Live Interval Analysis", false, false) 47INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 48INITIALIZE_PASS_DEPENDENCY(LiveVariables) 49INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 50INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 51INITIALIZE_PASS_END(LiveIntervals, "liveintervals", 52 "Live Interval Analysis", false, false) 53 54void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 55 AU.setPreservesCFG(); 56 AU.addRequired<AliasAnalysis>(); 57 AU.addPreserved<AliasAnalysis>(); 58 AU.addRequired<LiveVariables>(); 59 AU.addPreserved<LiveVariables>(); 60 AU.addPreservedID(MachineLoopInfoID); 61 AU.addRequiredTransitiveID(MachineDominatorsID); 62 AU.addPreservedID(MachineDominatorsID); 63 AU.addPreserved<SlotIndexes>(); 64 AU.addRequiredTransitive<SlotIndexes>(); 65 MachineFunctionPass::getAnalysisUsage(AU); 66} 67 68LiveIntervals::LiveIntervals() : MachineFunctionPass(ID), 69 DomTree(0), LRCalc(0) { 70 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 71} 72 73LiveIntervals::~LiveIntervals() { 74 delete LRCalc; 75} 76 77void LiveIntervals::releaseMemory() { 78 // Free the live intervals themselves. 79 for (DenseMap<unsigned, LiveInterval*>::iterator I = R2IMap.begin(), 80 E = R2IMap.end(); I != E; ++I) 81 delete I->second; 82 83 R2IMap.clear(); 84 RegMaskSlots.clear(); 85 RegMaskBits.clear(); 86 RegMaskBlocks.clear(); 87 88 for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i) 89 delete RegUnitIntervals[i]; 90 RegUnitIntervals.clear(); 91 92 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. 93 VNInfoAllocator.Reset(); 94} 95 96/// runOnMachineFunction - Register allocate the whole function 97/// 98bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 99 MF = &fn; 100 MRI = &MF->getRegInfo(); 101 TM = &fn.getTarget(); 102 TRI = TM->getRegisterInfo(); 103 TII = TM->getInstrInfo(); 104 AA = &getAnalysis<AliasAnalysis>(); 105 LV = &getAnalysis<LiveVariables>(); 106 Indexes = &getAnalysis<SlotIndexes>(); 107 DomTree = &getAnalysis<MachineDominatorTree>(); 108 if (!LRCalc) 109 LRCalc = new LiveRangeCalc(); 110 AllocatableRegs = TRI->getAllocatableSet(fn); 111 ReservedRegs = TRI->getReservedRegs(fn); 112 113 computeIntervals(); 114 115 numIntervals += getNumIntervals(); 116 117 computeLiveInRegUnits(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 127 // Dump the physregs. 128 for (unsigned Reg = 1, RegE = TRI->getNumRegs(); Reg != RegE; ++Reg) 129 if (const LiveInterval *LI = R2IMap.lookup(Reg)) 130 OS << PrintReg(Reg, TRI) << '\t' << *LI << '\n'; 131 132 // Dump the regunits. 133 for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i) 134 if (LiveInterval *LI = RegUnitIntervals[i]) 135 OS << PrintRegUnit(i, TRI) << " = " << *LI << '\n'; 136 137 // Dump the virtregs. 138 for (unsigned Reg = 0, RegE = MRI->getNumVirtRegs(); Reg != RegE; ++Reg) 139 if (const LiveInterval *LI = 140 R2IMap.lookup(TargetRegisterInfo::index2VirtReg(Reg))) 141 OS << PrintReg(LI->reg) << '\t' << *LI << '\n'; 142 143 printInstrs(OS); 144} 145 146void LiveIntervals::printInstrs(raw_ostream &OS) const { 147 OS << "********** MACHINEINSTRS **********\n"; 148 MF->print(OS, Indexes); 149} 150 151void LiveIntervals::dumpInstrs() const { 152 printInstrs(dbgs()); 153} 154 155static 156bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 157 unsigned Reg = MI.getOperand(MOIdx).getReg(); 158 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 159 const MachineOperand &MO = MI.getOperand(i); 160 if (!MO.isReg()) 161 continue; 162 if (MO.getReg() == Reg && MO.isDef()) { 163 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 164 MI.getOperand(MOIdx).getSubReg() && 165 (MO.getSubReg() || MO.isImplicit())); 166 return true; 167 } 168 } 169 return false; 170} 171 172/// isPartialRedef - Return true if the specified def at the specific index is 173/// partially re-defining the specified live interval. A common case of this is 174/// a definition of the sub-register. 175bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 176 LiveInterval &interval) { 177 if (!MO.getSubReg() || MO.isEarlyClobber()) 178 return false; 179 180 SlotIndex RedefIndex = MIIdx.getRegSlot(); 181 const LiveRange *OldLR = 182 interval.getLiveRangeContaining(RedefIndex.getRegSlot(true)); 183 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 184 if (DefMI != 0) { 185 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 186 } 187 return false; 188} 189 190void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 191 MachineBasicBlock::iterator mi, 192 SlotIndex MIIdx, 193 MachineOperand& MO, 194 unsigned MOIdx, 195 LiveInterval &interval) { 196 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI)); 197 198 // Virtual registers may be defined multiple times (due to phi 199 // elimination and 2-addr elimination). Much of what we do only has to be 200 // done once for the vreg. We use an empty interval to detect the first 201 // time we see a vreg. 202 LiveVariables::VarInfo& vi = LV->getVarInfo(interval.reg); 203 if (interval.empty()) { 204 // Get the Idx of the defining instructions. 205 SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber()); 206 207 // Make sure the first definition is not a partial redefinition. 208 assert(!MO.readsReg() && "First def cannot also read virtual register " 209 "missing <undef> flag?"); 210 211 VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator); 212 assert(ValNo->id == 0 && "First value in interval is not 0?"); 213 214 // Loop over all of the blocks that the vreg is defined in. There are 215 // two cases we have to handle here. The most common case is a vreg 216 // whose lifetime is contained within a basic block. In this case there 217 // will be a single kill, in MBB, which comes after the definition. 218 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 219 // FIXME: what about dead vars? 220 SlotIndex killIdx; 221 if (vi.Kills[0] != mi) 222 killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot(); 223 else 224 killIdx = defIndex.getDeadSlot(); 225 226 // If the kill happens after the definition, we have an intra-block 227 // live range. 228 if (killIdx > defIndex) { 229 assert(vi.AliveBlocks.empty() && 230 "Shouldn't be alive across any blocks!"); 231 LiveRange LR(defIndex, killIdx, ValNo); 232 interval.addRange(LR); 233 DEBUG(dbgs() << " +" << LR << "\n"); 234 return; 235 } 236 } 237 238 // The other case we handle is when a virtual register lives to the end 239 // of the defining block, potentially live across some blocks, then is 240 // live into some number of blocks, but gets killed. Start by adding a 241 // range that goes from this definition to the end of the defining block. 242 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 243 DEBUG(dbgs() << " +" << NewLR); 244 interval.addRange(NewLR); 245 246 bool PHIJoin = LV->isPHIJoin(interval.reg); 247 248 if (PHIJoin) { 249 // A phi join register is killed at the end of the MBB and revived as a 250 // new valno in the killing blocks. 251 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 252 DEBUG(dbgs() << " phi-join"); 253 ValNo->setHasPHIKill(true); 254 } else { 255 // Iterate over all of the blocks that the variable is completely 256 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 257 // live interval. 258 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 259 E = vi.AliveBlocks.end(); I != E; ++I) { 260 MachineBasicBlock *aliveBlock = MF->getBlockNumbered(*I); 261 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), 262 ValNo); 263 interval.addRange(LR); 264 DEBUG(dbgs() << " +" << LR); 265 } 266 } 267 268 // Finally, this virtual register is live from the start of any killing 269 // block to the 'use' slot of the killing instruction. 270 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 271 MachineInstr *Kill = vi.Kills[i]; 272 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 273 SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot(); 274 275 // Create interval with one of a NEW value number. Note that this value 276 // number isn't actually defined by an instruction, weird huh? :) 277 if (PHIJoin) { 278 assert(getInstructionFromIndex(Start) == 0 && 279 "PHI def index points at actual instruction."); 280 ValNo = interval.getNextValue(Start, VNInfoAllocator); 281 ValNo->setIsPHIDef(true); 282 } 283 LiveRange LR(Start, killIdx, ValNo); 284 interval.addRange(LR); 285 DEBUG(dbgs() << " +" << LR); 286 } 287 288 } else { 289 if (MultipleDefsBySameMI(*mi, MOIdx)) 290 // Multiple defs of the same virtual register by the same instruction. 291 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 292 // This is likely due to elimination of REG_SEQUENCE instructions. Return 293 // here since there is nothing to do. 294 return; 295 296 // If this is the second time we see a virtual register definition, it 297 // must be due to phi elimination or two addr elimination. If this is 298 // the result of two address elimination, then the vreg is one of the 299 // def-and-use register operand. 300 301 // It may also be partial redef like this: 302 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 303 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 304 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 305 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 306 // If this is a two-address definition, then we have already processed 307 // the live range. The only problem is that we didn't realize there 308 // are actually two values in the live interval. Because of this we 309 // need to take the LiveRegion that defines this register and split it 310 // into two values. 311 SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber()); 312 313 const LiveRange *OldLR = 314 interval.getLiveRangeContaining(RedefIndex.getRegSlot(true)); 315 VNInfo *OldValNo = OldLR->valno; 316 SlotIndex DefIndex = OldValNo->def.getRegSlot(); 317 318 // Delete the previous value, which should be short and continuous, 319 // because the 2-addr copy must be in the same MBB as the redef. 320 interval.removeRange(DefIndex, RedefIndex); 321 322 // The new value number (#1) is defined by the instruction we claimed 323 // defined value #0. 324 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator); 325 326 // Value#0 is now defined by the 2-addr instruction. 327 OldValNo->def = RedefIndex; 328 329 // Add the new live interval which replaces the range for the input copy. 330 LiveRange LR(DefIndex, RedefIndex, ValNo); 331 DEBUG(dbgs() << " replace range with " << LR); 332 interval.addRange(LR); 333 334 // If this redefinition is dead, we need to add a dummy unit live 335 // range covering the def slot. 336 if (MO.isDead()) 337 interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(), 338 OldValNo)); 339 340 DEBUG(dbgs() << " RESULT: " << interval); 341 } else if (LV->isPHIJoin(interval.reg)) { 342 // In the case of PHI elimination, each variable definition is only 343 // live until the end of the block. We've already taken care of the 344 // rest of the live range. 345 346 SlotIndex defIndex = MIIdx.getRegSlot(); 347 if (MO.isEarlyClobber()) 348 defIndex = MIIdx.getRegSlot(true); 349 350 VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator); 351 352 SlotIndex killIndex = getMBBEndIdx(mbb); 353 LiveRange LR(defIndex, killIndex, ValNo); 354 interval.addRange(LR); 355 ValNo->setHasPHIKill(true); 356 DEBUG(dbgs() << " phi-join +" << LR); 357 } else { 358 llvm_unreachable("Multiply defined register"); 359 } 360 } 361 362 DEBUG(dbgs() << '\n'); 363} 364 365static bool isRegLiveIntoSuccessor(const MachineBasicBlock *MBB, unsigned Reg) { 366 for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(), 367 SE = MBB->succ_end(); 368 SI != SE; ++SI) { 369 const MachineBasicBlock* succ = *SI; 370 if (succ->isLiveIn(Reg)) 371 return true; 372 } 373 return false; 374} 375 376void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 377 MachineBasicBlock::iterator mi, 378 SlotIndex MIIdx, 379 MachineOperand& MO, 380 LiveInterval &interval) { 381 DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI)); 382 383 SlotIndex baseIndex = MIIdx; 384 SlotIndex start = baseIndex.getRegSlot(MO.isEarlyClobber()); 385 SlotIndex end = start; 386 387 // If it is not used after definition, it is considered dead at 388 // the instruction defining it. Hence its interval is: 389 // [defSlot(def), defSlot(def)+1) 390 // For earlyclobbers, the defSlot was pushed back one; the extra 391 // advance below compensates. 392 if (MO.isDead()) { 393 DEBUG(dbgs() << " dead"); 394 end = start.getDeadSlot(); 395 goto exit; 396 } 397 398 // If it is not dead on definition, it must be killed by a 399 // subsequent instruction. Hence its interval is: 400 // [defSlot(def), useSlot(kill)+1) 401 baseIndex = baseIndex.getNextIndex(); 402 while (++mi != MBB->end()) { 403 404 if (mi->isDebugValue()) 405 continue; 406 if (getInstructionFromIndex(baseIndex) == 0) 407 baseIndex = Indexes->getNextNonNullIndex(baseIndex); 408 409 if (mi->killsRegister(interval.reg, TRI)) { 410 DEBUG(dbgs() << " killed"); 411 end = baseIndex.getRegSlot(); 412 goto exit; 413 } else { 414 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,TRI); 415 if (DefIdx != -1) { 416 if (mi->isRegTiedToUseOperand(DefIdx)) { 417 // Two-address instruction. 418 end = baseIndex.getRegSlot(mi->getOperand(DefIdx).isEarlyClobber()); 419 } else { 420 // Another instruction redefines the register before it is ever read. 421 // Then the register is essentially dead at the instruction that 422 // defines it. Hence its interval is: 423 // [defSlot(def), defSlot(def)+1) 424 DEBUG(dbgs() << " dead"); 425 end = start.getDeadSlot(); 426 } 427 goto exit; 428 } 429 } 430 431 baseIndex = baseIndex.getNextIndex(); 432 } 433 434 // If we get here the register *should* be live out. 435 assert(!isAllocatable(interval.reg) && "Physregs shouldn't be live out!"); 436 437 // FIXME: We need saner rules for reserved regs. 438 if (isReserved(interval.reg)) { 439 end = start.getDeadSlot(); 440 } else { 441 // Unreserved, unallocable registers like EFLAGS can be live across basic 442 // block boundaries. 443 assert(isRegLiveIntoSuccessor(MBB, interval.reg) && 444 "Unreserved reg not live-out?"); 445 end = getMBBEndIdx(MBB); 446 } 447exit: 448 assert(start < end && "did not find end of interval?"); 449 450 // Already exists? Extend old live interval. 451 VNInfo *ValNo = interval.getVNInfoAt(start); 452 bool Extend = ValNo != 0; 453 if (!Extend) 454 ValNo = interval.getNextValue(start, VNInfoAllocator); 455 LiveRange LR(start, end, ValNo); 456 interval.addRange(LR); 457 DEBUG(dbgs() << " +" << LR << '\n'); 458} 459 460void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 461 MachineBasicBlock::iterator MI, 462 SlotIndex MIIdx, 463 MachineOperand& MO, 464 unsigned MOIdx) { 465 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 466 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 467 getOrCreateInterval(MO.getReg())); 468 else 469 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 470 getOrCreateInterval(MO.getReg())); 471} 472 473void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 474 SlotIndex MIIdx, 475 LiveInterval &interval) { 476 assert(TargetRegisterInfo::isPhysicalRegister(interval.reg) && 477 "Only physical registers can be live in."); 478 assert((!isAllocatable(interval.reg) || MBB->getParent()->begin() || 479 MBB->isLandingPad()) && 480 "Allocatable live-ins only valid for entry blocks and landing pads."); 481 482 DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, TRI)); 483 484 // Look for kills, if it reaches a def before it's killed, then it shouldn't 485 // be considered a livein. 486 MachineBasicBlock::iterator mi = MBB->begin(); 487 MachineBasicBlock::iterator E = MBB->end(); 488 // Skip over DBG_VALUE at the start of the MBB. 489 if (mi != E && mi->isDebugValue()) { 490 while (++mi != E && mi->isDebugValue()) 491 ; 492 if (mi == E) 493 // MBB is empty except for DBG_VALUE's. 494 return; 495 } 496 497 SlotIndex baseIndex = MIIdx; 498 SlotIndex start = baseIndex; 499 if (getInstructionFromIndex(baseIndex) == 0) 500 baseIndex = Indexes->getNextNonNullIndex(baseIndex); 501 502 SlotIndex end = baseIndex; 503 bool SeenDefUse = false; 504 505 while (mi != E) { 506 if (mi->killsRegister(interval.reg, TRI)) { 507 DEBUG(dbgs() << " killed"); 508 end = baseIndex.getRegSlot(); 509 SeenDefUse = true; 510 break; 511 } else if (mi->modifiesRegister(interval.reg, TRI)) { 512 // Another instruction redefines the register before it is ever read. 513 // Then the register is essentially dead at the instruction that defines 514 // it. Hence its interval is: 515 // [defSlot(def), defSlot(def)+1) 516 DEBUG(dbgs() << " dead"); 517 end = start.getDeadSlot(); 518 SeenDefUse = true; 519 break; 520 } 521 522 while (++mi != E && mi->isDebugValue()) 523 // Skip over DBG_VALUE. 524 ; 525 if (mi != E) 526 baseIndex = Indexes->getNextNonNullIndex(baseIndex); 527 } 528 529 // Live-in register might not be used at all. 530 if (!SeenDefUse) { 531 if (isAllocatable(interval.reg) || 532 !isRegLiveIntoSuccessor(MBB, interval.reg)) { 533 // Allocatable registers are never live through. 534 // Non-allocatable registers that aren't live into any successors also 535 // aren't live through. 536 DEBUG(dbgs() << " dead"); 537 return; 538 } else { 539 // If we get here the register is non-allocatable and live into some 540 // successor. We'll conservatively assume it's live-through. 541 DEBUG(dbgs() << " live through"); 542 end = getMBBEndIdx(MBB); 543 } 544 } 545 546 SlotIndex defIdx = getMBBStartIdx(MBB); 547 assert(getInstructionFromIndex(defIdx) == 0 && 548 "PHI def index points at actual instruction."); 549 VNInfo *vni = interval.getNextValue(defIdx, VNInfoAllocator); 550 vni->setIsPHIDef(true); 551 LiveRange LR(start, end, vni); 552 553 interval.addRange(LR); 554 DEBUG(dbgs() << " +" << LR << '\n'); 555} 556 557/// computeIntervals - computes the live intervals for virtual 558/// registers. for some ordering of the machine instructions [1,N] a 559/// live interval is an interval [i, j) where 1 <= i <= j < N for 560/// which a variable is live 561void LiveIntervals::computeIntervals() { 562 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 563 << "********** Function: " 564 << ((Value*)MF->getFunction())->getName() << '\n'); 565 566 RegMaskBlocks.resize(MF->getNumBlockIDs()); 567 568 SmallVector<unsigned, 8> UndefUses; 569 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end(); 570 MBBI != E; ++MBBI) { 571 MachineBasicBlock *MBB = MBBI; 572 RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size(); 573 574 if (MBB->empty()) 575 continue; 576 577 // Track the index of the current machine instr. 578 SlotIndex MIIndex = getMBBStartIdx(MBB); 579 DEBUG(dbgs() << "BB#" << MBB->getNumber() 580 << ":\t\t# derived from " << MBB->getName() << "\n"); 581 582 // Create intervals for live-ins to this BB first. 583 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 584 LE = MBB->livein_end(); LI != LE; ++LI) { 585 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 586 } 587 588 // Skip over empty initial indices. 589 if (getInstructionFromIndex(MIIndex) == 0) 590 MIIndex = Indexes->getNextNonNullIndex(MIIndex); 591 592 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 593 MI != miEnd; ++MI) { 594 DEBUG(dbgs() << MIIndex << "\t" << *MI); 595 if (MI->isDebugValue()) 596 continue; 597 assert(Indexes->getInstructionFromIndex(MIIndex) == MI && 598 "Lost SlotIndex synchronization"); 599 600 // Handle defs. 601 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 602 MachineOperand &MO = MI->getOperand(i); 603 604 // Collect register masks. 605 if (MO.isRegMask()) { 606 RegMaskSlots.push_back(MIIndex.getRegSlot()); 607 RegMaskBits.push_back(MO.getRegMask()); 608 continue; 609 } 610 611 if (!MO.isReg() || !MO.getReg()) 612 continue; 613 614 // handle register defs - build intervals 615 if (MO.isDef()) 616 handleRegisterDef(MBB, MI, MIIndex, MO, i); 617 else if (MO.isUndef()) 618 UndefUses.push_back(MO.getReg()); 619 } 620 621 // Move to the next instr slot. 622 MIIndex = Indexes->getNextNonNullIndex(MIIndex); 623 } 624 625 // Compute the number of register mask instructions in this block. 626 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()]; 627 RMB.second = RegMaskSlots.size() - RMB.first;; 628 } 629 630 // Create empty intervals for registers defined by implicit_def's (except 631 // for those implicit_def that define values which are liveout of their 632 // blocks. 633 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 634 unsigned UndefReg = UndefUses[i]; 635 (void)getOrCreateInterval(UndefReg); 636 } 637} 638 639LiveInterval* LiveIntervals::createInterval(unsigned reg) { 640 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 641 return new LiveInterval(reg, Weight); 642} 643 644 645//===----------------------------------------------------------------------===// 646// Register Unit Liveness 647//===----------------------------------------------------------------------===// 648// 649// Fixed interference typically comes from ABI boundaries: Function arguments 650// and return values are passed in fixed registers, and so are exception 651// pointers entering landing pads. Certain instructions require values to be 652// present in specific registers. That is also represented through fixed 653// interference. 654// 655 656/// computeRegUnitInterval - Compute the live interval of a register unit, based 657/// on the uses and defs of aliasing registers. The interval should be empty, 658/// or contain only dead phi-defs from ABI blocks. 659void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) { 660 unsigned Unit = LI->reg; 661 662 assert(LRCalc && "LRCalc not initialized."); 663 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 664 665 // The physregs aliasing Unit are the roots and their super-registers. 666 // Create all values as dead defs before extending to uses. Note that roots 667 // may share super-registers. That's OK because createDeadDefs() is 668 // idempotent. It is very rare for a register unit to have multiple roots, so 669 // uniquing super-registers is probably not worthwhile. 670 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { 671 unsigned Root = *Roots; 672 if (!MRI->reg_empty(Root)) 673 LRCalc->createDeadDefs(LI, Root); 674 for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) { 675 if (!MRI->reg_empty(*Supers)) 676 LRCalc->createDeadDefs(LI, *Supers); 677 } 678 } 679 680 // Now extend LI to reach all uses. 681 // Ignore uses of reserved registers. We only track defs of those. 682 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { 683 unsigned Root = *Roots; 684 if (!isReserved(Root) && !MRI->reg_empty(Root)) 685 LRCalc->extendToUses(LI, Root); 686 for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) { 687 unsigned Reg = *Supers; 688 if (!isReserved(Reg) && !MRI->reg_empty(Reg)) 689 LRCalc->extendToUses(LI, Reg); 690 } 691 } 692} 693 694 695/// computeLiveInRegUnits - Precompute the live ranges of any register units 696/// that are live-in to an ABI block somewhere. Register values can appear 697/// without a corresponding def when entering the entry block or a landing pad. 698/// 699void LiveIntervals::computeLiveInRegUnits() { 700 RegUnitIntervals.resize(TRI->getNumRegUnits()); 701 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n"); 702 703 // Keep track of the intervals allocated. 704 SmallVector<LiveInterval*, 8> NewIntvs; 705 706 // Check all basic blocks for live-ins. 707 for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end(); 708 MFI != MFE; ++MFI) { 709 const MachineBasicBlock *MBB = MFI; 710 711 // We only care about ABI blocks: Entry + landing pads. 712 if ((MFI != MF->begin() && !MBB->isLandingPad()) || MBB->livein_empty()) 713 continue; 714 715 // Create phi-defs at Begin for all live-in registers. 716 SlotIndex Begin = Indexes->getMBBStartIdx(MBB); 717 DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber()); 718 for (MachineBasicBlock::livein_iterator LII = MBB->livein_begin(), 719 LIE = MBB->livein_end(); LII != LIE; ++LII) { 720 for (MCRegUnitIterator Units(*LII, TRI); Units.isValid(); ++Units) { 721 unsigned Unit = *Units; 722 LiveInterval *Intv = RegUnitIntervals[Unit]; 723 if (!Intv) { 724 Intv = RegUnitIntervals[Unit] = new LiveInterval(Unit, HUGE_VALF); 725 NewIntvs.push_back(Intv); 726 } 727 VNInfo *VNI = Intv->createDeadDef(Begin, getVNInfoAllocator()); 728 (void)VNI; 729 DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id); 730 } 731 } 732 DEBUG(dbgs() << '\n'); 733 } 734 DEBUG(dbgs() << "Created " << NewIntvs.size() << " new intervals.\n"); 735 736 // Compute the 'normal' part of the intervals. 737 for (unsigned i = 0, e = NewIntvs.size(); i != e; ++i) 738 computeRegUnitInterval(NewIntvs[i]); 739} 740 741 742/// shrinkToUses - After removing some uses of a register, shrink its live 743/// range to just the remaining uses. This method does not compute reaching 744/// defs for new uses, and it doesn't remove dead defs. 745bool LiveIntervals::shrinkToUses(LiveInterval *li, 746 SmallVectorImpl<MachineInstr*> *dead) { 747 DEBUG(dbgs() << "Shrink: " << *li << '\n'); 748 assert(TargetRegisterInfo::isVirtualRegister(li->reg) 749 && "Can only shrink virtual registers"); 750 // Find all the values used, including PHI kills. 751 SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList; 752 753 // Blocks that have already been added to WorkList as live-out. 754 SmallPtrSet<MachineBasicBlock*, 16> LiveOut; 755 756 // Visit all instructions reading li->reg. 757 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg); 758 MachineInstr *UseMI = I.skipInstruction();) { 759 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) 760 continue; 761 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); 762 LiveRangeQuery LRQ(*li, Idx); 763 VNInfo *VNI = LRQ.valueIn(); 764 if (!VNI) { 765 // This shouldn't happen: readsVirtualRegister returns true, but there is 766 // no live value. It is likely caused by a target getting <undef> flags 767 // wrong. 768 DEBUG(dbgs() << Idx << '\t' << *UseMI 769 << "Warning: Instr claims to read non-existent value in " 770 << *li << '\n'); 771 continue; 772 } 773 // Special case: An early-clobber tied operand reads and writes the 774 // register one slot early. 775 if (VNInfo *DefVNI = LRQ.valueDefined()) 776 Idx = DefVNI->def; 777 778 WorkList.push_back(std::make_pair(Idx, VNI)); 779 } 780 781 // Create a new live interval with only minimal live segments per def. 782 LiveInterval NewLI(li->reg, 0); 783 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); 784 I != E; ++I) { 785 VNInfo *VNI = *I; 786 if (VNI->isUnused()) 787 continue; 788 NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI)); 789 } 790 791 // Keep track of the PHIs that are in use. 792 SmallPtrSet<VNInfo*, 8> UsedPHIs; 793 794 // Extend intervals to reach all uses in WorkList. 795 while (!WorkList.empty()) { 796 SlotIndex Idx = WorkList.back().first; 797 VNInfo *VNI = WorkList.back().second; 798 WorkList.pop_back(); 799 const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot()); 800 SlotIndex BlockStart = getMBBStartIdx(MBB); 801 802 // Extend the live range for VNI to be live at Idx. 803 if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) { 804 (void)ExtVNI; 805 assert(ExtVNI == VNI && "Unexpected existing value number"); 806 // Is this a PHIDef we haven't seen before? 807 if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI)) 808 continue; 809 // The PHI is live, make sure the predecessors are live-out. 810 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), 811 PE = MBB->pred_end(); PI != PE; ++PI) { 812 if (!LiveOut.insert(*PI)) 813 continue; 814 SlotIndex Stop = getMBBEndIdx(*PI); 815 // A predecessor is not required to have a live-out value for a PHI. 816 if (VNInfo *PVNI = li->getVNInfoBefore(Stop)) 817 WorkList.push_back(std::make_pair(Stop, PVNI)); 818 } 819 continue; 820 } 821 822 // VNI is live-in to MBB. 823 DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); 824 NewLI.addRange(LiveRange(BlockStart, Idx, VNI)); 825 826 // Make sure VNI is live-out from the predecessors. 827 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), 828 PE = MBB->pred_end(); PI != PE; ++PI) { 829 if (!LiveOut.insert(*PI)) 830 continue; 831 SlotIndex Stop = getMBBEndIdx(*PI); 832 assert(li->getVNInfoBefore(Stop) == VNI && 833 "Wrong value out of predecessor"); 834 WorkList.push_back(std::make_pair(Stop, VNI)); 835 } 836 } 837 838 // Handle dead values. 839 bool CanSeparate = false; 840 for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); 841 I != E; ++I) { 842 VNInfo *VNI = *I; 843 if (VNI->isUnused()) 844 continue; 845 LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def); 846 assert(LII != NewLI.end() && "Missing live range for PHI"); 847 if (LII->end != VNI->def.getDeadSlot()) 848 continue; 849 if (VNI->isPHIDef()) { 850 // This is a dead PHI. Remove it. 851 VNI->setIsUnused(true); 852 NewLI.removeRange(*LII); 853 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); 854 CanSeparate = true; 855 } else { 856 // This is a dead def. Make sure the instruction knows. 857 MachineInstr *MI = getInstructionFromIndex(VNI->def); 858 assert(MI && "No instruction defining live value"); 859 MI->addRegisterDead(li->reg, TRI); 860 if (dead && MI->allDefsAreDead()) { 861 DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI); 862 dead->push_back(MI); 863 } 864 } 865 } 866 867 // Move the trimmed ranges back. 868 li->ranges.swap(NewLI.ranges); 869 DEBUG(dbgs() << "Shrunk: " << *li << '\n'); 870 return CanSeparate; 871} 872 873 874//===----------------------------------------------------------------------===// 875// Register allocator hooks. 876// 877 878void LiveIntervals::addKillFlags() { 879 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 880 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 881 if (MRI->reg_nodbg_empty(Reg)) 882 continue; 883 LiveInterval *LI = &getInterval(Reg); 884 885 // Every instruction that kills Reg corresponds to a live range end point. 886 for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE; 887 ++RI) { 888 // A block index indicates an MBB edge. 889 if (RI->end.isBlock()) 890 continue; 891 MachineInstr *MI = getInstructionFromIndex(RI->end); 892 if (!MI) 893 continue; 894 MI->addRegisterKilled(Reg, NULL); 895 } 896 } 897} 898 899MachineBasicBlock* 900LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const { 901 // A local live range must be fully contained inside the block, meaning it is 902 // defined and killed at instructions, not at block boundaries. It is not 903 // live in or or out of any block. 904 // 905 // It is technically possible to have a PHI-defined live range identical to a 906 // single block, but we are going to return false in that case. 907 908 SlotIndex Start = LI.beginIndex(); 909 if (Start.isBlock()) 910 return NULL; 911 912 SlotIndex Stop = LI.endIndex(); 913 if (Stop.isBlock()) 914 return NULL; 915 916 // getMBBFromIndex doesn't need to search the MBB table when both indexes 917 // belong to proper instructions. 918 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start); 919 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop); 920 return MBB1 == MBB2 ? MBB1 : NULL; 921} 922 923float 924LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 925 // Limit the loop depth ridiculousness. 926 if (loopDepth > 200) 927 loopDepth = 200; 928 929 // The loop depth is used to roughly estimate the number of times the 930 // instruction is executed. Something like 10^d is simple, but will quickly 931 // overflow a float. This expression behaves like 10^d for small d, but is 932 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 933 // headroom before overflow. 934 // By the way, powf() might be unavailable here. For consistency, 935 // We may take pow(double,double). 936 float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth); 937 938 return (isDef + isUse) * lc; 939} 940 941LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 942 MachineInstr* startInst) { 943 LiveInterval& Interval = getOrCreateInterval(reg); 944 VNInfo* VN = Interval.getNextValue( 945 SlotIndex(getInstructionIndex(startInst).getRegSlot()), 946 getVNInfoAllocator()); 947 VN->setHasPHIKill(true); 948 LiveRange LR( 949 SlotIndex(getInstructionIndex(startInst).getRegSlot()), 950 getMBBEndIdx(startInst->getParent()), VN); 951 Interval.addRange(LR); 952 953 return LR; 954} 955 956 957//===----------------------------------------------------------------------===// 958// Register mask functions 959//===----------------------------------------------------------------------===// 960 961bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI, 962 BitVector &UsableRegs) { 963 if (LI.empty()) 964 return false; 965 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end(); 966 967 // Use a smaller arrays for local live ranges. 968 ArrayRef<SlotIndex> Slots; 969 ArrayRef<const uint32_t*> Bits; 970 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) { 971 Slots = getRegMaskSlotsInBlock(MBB->getNumber()); 972 Bits = getRegMaskBitsInBlock(MBB->getNumber()); 973 } else { 974 Slots = getRegMaskSlots(); 975 Bits = getRegMaskBits(); 976 } 977 978 // We are going to enumerate all the register mask slots contained in LI. 979 // Start with a binary search of RegMaskSlots to find a starting point. 980 ArrayRef<SlotIndex>::iterator SlotI = 981 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start); 982 ArrayRef<SlotIndex>::iterator SlotE = Slots.end(); 983 984 // No slots in range, LI begins after the last call. 985 if (SlotI == SlotE) 986 return false; 987 988 bool Found = false; 989 for (;;) { 990 assert(*SlotI >= LiveI->start); 991 // Loop over all slots overlapping this segment. 992 while (*SlotI < LiveI->end) { 993 // *SlotI overlaps LI. Collect mask bits. 994 if (!Found) { 995 // This is the first overlap. Initialize UsableRegs to all ones. 996 UsableRegs.clear(); 997 UsableRegs.resize(TRI->getNumRegs(), true); 998 Found = true; 999 } 1000 // Remove usable registers clobbered by this mask. 1001 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]); 1002 if (++SlotI == SlotE) 1003 return Found; 1004 } 1005 // *SlotI is beyond the current LI segment. 1006 LiveI = LI.advanceTo(LiveI, *SlotI); 1007 if (LiveI == LiveE) 1008 return Found; 1009 // Advance SlotI until it overlaps. 1010 while (*SlotI < LiveI->start) 1011 if (++SlotI == SlotE) 1012 return Found; 1013 } 1014} 1015 1016//===----------------------------------------------------------------------===// 1017// IntervalUpdate class. 1018//===----------------------------------------------------------------------===// 1019 1020// HMEditor is a toolkit used by handleMove to trim or extend live intervals. 1021class LiveIntervals::HMEditor { 1022private: 1023 LiveIntervals& LIS; 1024 const MachineRegisterInfo& MRI; 1025 const TargetRegisterInfo& TRI; 1026 SlotIndex NewIdx; 1027 1028 typedef std::pair<LiveInterval*, LiveRange*> IntRangePair; 1029 typedef DenseSet<IntRangePair> RangeSet; 1030 1031 struct RegRanges { 1032 LiveRange* Use; 1033 LiveRange* EC; 1034 LiveRange* Dead; 1035 LiveRange* Def; 1036 RegRanges() : Use(0), EC(0), Dead(0), Def(0) {} 1037 }; 1038 typedef DenseMap<unsigned, RegRanges> BundleRanges; 1039 1040public: 1041 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, 1042 const TargetRegisterInfo& TRI, SlotIndex NewIdx) 1043 : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {} 1044 1045 // Update intervals for all operands of MI from OldIdx to NewIdx. 1046 // This assumes that MI used to be at OldIdx, and now resides at 1047 // NewIdx. 1048 void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) { 1049 assert(NewIdx != OldIdx && "No-op move? That's a bit strange."); 1050 1051 // Collect the operands. 1052 RangeSet Entering, Internal, Exiting; 1053 bool hasRegMaskOp = false; 1054 collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); 1055 1056 // To keep the LiveRanges valid within an interval, move the ranges closest 1057 // to the destination first. This prevents ranges from overlapping, to that 1058 // APIs like removeRange still work. 1059 if (NewIdx < OldIdx) { 1060 moveAllEnteringFrom(OldIdx, Entering); 1061 moveAllInternalFrom(OldIdx, Internal); 1062 moveAllExitingFrom(OldIdx, Exiting); 1063 } 1064 else { 1065 moveAllExitingFrom(OldIdx, Exiting); 1066 moveAllInternalFrom(OldIdx, Internal); 1067 moveAllEnteringFrom(OldIdx, Entering); 1068 } 1069 1070 if (hasRegMaskOp) 1071 updateRegMaskSlots(OldIdx); 1072 1073#ifndef NDEBUG 1074 LIValidator validator; 1075 validator = std::for_each(Entering.begin(), Entering.end(), validator); 1076 validator = std::for_each(Internal.begin(), Internal.end(), validator); 1077 validator = std::for_each(Exiting.begin(), Exiting.end(), validator); 1078 assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness."); 1079#endif 1080 1081 } 1082 1083 // Update intervals for all operands of MI to refer to BundleStart's 1084 // SlotIndex. 1085 void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) { 1086 if (MI == BundleStart) 1087 return; // Bundling instr with itself - nothing to do. 1088 1089 SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI); 1090 assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI && 1091 "SlotIndex <-> Instruction mapping broken for MI"); 1092 1093 // Collect all ranges already in the bundle. 1094 MachineBasicBlock::instr_iterator BII(BundleStart); 1095 RangeSet Entering, Internal, Exiting; 1096 bool hasRegMaskOp = false; 1097 collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); 1098 assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); 1099 for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) { 1100 if (&*BII == MI) 1101 continue; 1102 collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); 1103 assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); 1104 } 1105 1106 BundleRanges BR = createBundleRanges(Entering, Internal, Exiting); 1107 1108 Entering.clear(); 1109 Internal.clear(); 1110 Exiting.clear(); 1111 collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); 1112 assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); 1113 1114 DEBUG(dbgs() << "Entering: " << Entering.size() << "\n"); 1115 DEBUG(dbgs() << "Internal: " << Internal.size() << "\n"); 1116 DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n"); 1117 1118 moveAllEnteringFromInto(OldIdx, Entering, BR); 1119 moveAllInternalFromInto(OldIdx, Internal, BR); 1120 moveAllExitingFromInto(OldIdx, Exiting, BR); 1121 1122 1123#ifndef NDEBUG 1124 LIValidator validator; 1125 validator = std::for_each(Entering.begin(), Entering.end(), validator); 1126 validator = std::for_each(Internal.begin(), Internal.end(), validator); 1127 validator = std::for_each(Exiting.begin(), Exiting.end(), validator); 1128 assert(validator.rangesOk() && "moveAllOperandsInto broke liveness."); 1129#endif 1130 } 1131 1132private: 1133 1134#ifndef NDEBUG 1135 class LIValidator { 1136 private: 1137 DenseSet<const LiveInterval*> Checked, Bogus; 1138 public: 1139 void operator()(const IntRangePair& P) { 1140 const LiveInterval* LI = P.first; 1141 if (Checked.count(LI)) 1142 return; 1143 Checked.insert(LI); 1144 if (LI->empty()) 1145 return; 1146 SlotIndex LastEnd = LI->begin()->start; 1147 for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end(); 1148 LRI != LRE; ++LRI) { 1149 const LiveRange& LR = *LRI; 1150 if (LastEnd > LR.start || LR.start >= LR.end) 1151 Bogus.insert(LI); 1152 LastEnd = LR.end; 1153 } 1154 } 1155 1156 bool rangesOk() const { 1157 return Bogus.empty(); 1158 } 1159 }; 1160#endif 1161 1162 // Collect IntRangePairs for all operands of MI that may need fixing. 1163 // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes' 1164 // maps). 1165 void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal, 1166 RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) { 1167 hasRegMaskOp = false; 1168 for (MachineInstr::mop_iterator MOI = MI->operands_begin(), 1169 MOE = MI->operands_end(); 1170 MOI != MOE; ++MOI) { 1171 const MachineOperand& MO = *MOI; 1172 1173 if (MO.isRegMask()) { 1174 hasRegMaskOp = true; 1175 continue; 1176 } 1177 1178 if (!MO.isReg() || MO.getReg() == 0) 1179 continue; 1180 1181 unsigned Reg = MO.getReg(); 1182 1183 // TODO: Currently we're skipping uses that are reserved or have no 1184 // interval, but we're not updating their kills. This should be 1185 // fixed. 1186 if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)) 1187 continue; 1188 1189 // Collect ranges for register units. These live ranges are computed on 1190 // demand, so just skip any that haven't been computed yet. 1191 if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.trackingRegUnits()) 1192 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) 1193 if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) 1194 collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx); 1195 1196 // Collect ranges for individual registers. 1197 if (LIS.hasInterval(Reg)) 1198 collectRanges(MO, &LIS.getInterval(Reg), 1199 Entering, Internal, Exiting, OldIdx); 1200 } 1201 } 1202 1203 void collectRanges(const MachineOperand &MO, LiveInterval *LI, 1204 RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting, 1205 SlotIndex OldIdx) { 1206 if (MO.readsReg()) { 1207 LiveRange* LR = LI->getLiveRangeContaining(OldIdx); 1208 if (LR != 0) 1209 Entering.insert(std::make_pair(LI, LR)); 1210 } 1211 if (MO.isDef()) { 1212 LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot()); 1213 assert(LR != 0 && "No live range for def?"); 1214 if (LR->end > OldIdx.getDeadSlot()) 1215 Exiting.insert(std::make_pair(LI, LR)); 1216 else 1217 Internal.insert(std::make_pair(LI, LR)); 1218 } 1219 } 1220 1221 BundleRanges createBundleRanges(RangeSet& Entering, 1222 RangeSet& Internal, 1223 RangeSet& Exiting) { 1224 BundleRanges BR; 1225 1226 for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); 1227 EI != EE; ++EI) { 1228 LiveInterval* LI = EI->first; 1229 LiveRange* LR = EI->second; 1230 BR[LI->reg].Use = LR; 1231 } 1232 1233 for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); 1234 II != IE; ++II) { 1235 LiveInterval* LI = II->first; 1236 LiveRange* LR = II->second; 1237 if (LR->end.isDead()) { 1238 BR[LI->reg].Dead = LR; 1239 } else { 1240 BR[LI->reg].EC = LR; 1241 } 1242 } 1243 1244 for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); 1245 EI != EE; ++EI) { 1246 LiveInterval* LI = EI->first; 1247 LiveRange* LR = EI->second; 1248 BR[LI->reg].Def = LR; 1249 } 1250 1251 return BR; 1252 } 1253 1254 void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) { 1255 MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx); 1256 if (!OldKillMI->killsRegister(reg)) 1257 return; // Bail out if we don't have kill flags on the old register. 1258 MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx); 1259 assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill."); 1260 assert(!NewKillMI->killsRegister(reg) && 1261 "New kill instr is already a kill."); 1262 OldKillMI->clearRegisterKills(reg, &TRI); 1263 NewKillMI->addRegisterKilled(reg, &TRI); 1264 } 1265 1266 void updateRegMaskSlots(SlotIndex OldIdx) { 1267 SmallVectorImpl<SlotIndex>::iterator RI = 1268 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), 1269 OldIdx); 1270 assert(*RI == OldIdx && "No RegMask at OldIdx."); 1271 *RI = NewIdx; 1272 assert(*prior(RI) < *RI && *RI < *next(RI) && 1273 "RegSlots out of order. Did you move one call across another?"); 1274 } 1275 1276 // Return the last use of reg between NewIdx and OldIdx. 1277 SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) { 1278 SlotIndex LastUse = NewIdx; 1279 for (MachineRegisterInfo::use_nodbg_iterator 1280 UI = MRI.use_nodbg_begin(Reg), 1281 UE = MRI.use_nodbg_end(); 1282 UI != UE; UI.skipInstruction()) { 1283 const MachineInstr* MI = &*UI; 1284 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); 1285 if (InstSlot > LastUse && InstSlot < OldIdx) 1286 LastUse = InstSlot; 1287 } 1288 return LastUse; 1289 } 1290 1291 void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) { 1292 LiveInterval* LI = P.first; 1293 LiveRange* LR = P.second; 1294 bool LiveThrough = LR->end > OldIdx.getRegSlot(); 1295 if (LiveThrough) 1296 return; 1297 SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); 1298 if (LastUse != NewIdx) 1299 moveKillFlags(LI->reg, NewIdx, LastUse); 1300 LR->end = LastUse.getRegSlot(); 1301 } 1302 1303 void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) { 1304 LiveInterval* LI = P.first; 1305 LiveRange* LR = P.second; 1306 // Extend the LiveRange if NewIdx is past the end. 1307 if (NewIdx > LR->end) { 1308 // Move kill flags if OldIdx was not originally the end 1309 // (otherwise LR->end points to an invalid slot). 1310 if (LR->end.getRegSlot() != OldIdx.getRegSlot()) { 1311 assert(LR->end > OldIdx && "LiveRange does not cover original slot"); 1312 moveKillFlags(LI->reg, LR->end, NewIdx); 1313 } 1314 LR->end = NewIdx.getRegSlot(); 1315 } 1316 } 1317 1318 void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) { 1319 bool GoingUp = NewIdx < OldIdx; 1320 1321 if (GoingUp) { 1322 for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); 1323 EI != EE; ++EI) 1324 moveEnteringUpFrom(OldIdx, *EI); 1325 } else { 1326 for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); 1327 EI != EE; ++EI) 1328 moveEnteringDownFrom(OldIdx, *EI); 1329 } 1330 } 1331 1332 void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) { 1333 LiveInterval* LI = P.first; 1334 LiveRange* LR = P.second; 1335 assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && 1336 LR->end <= OldIdx.getDeadSlot() && 1337 "Range should be internal to OldIdx."); 1338 LiveRange Tmp(*LR); 1339 Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber()); 1340 Tmp.valno->def = Tmp.start; 1341 Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot(); 1342 LI->removeRange(*LR); 1343 LI->addRange(Tmp); 1344 } 1345 1346 void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) { 1347 for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); 1348 II != IE; ++II) 1349 moveInternalFrom(OldIdx, *II); 1350 } 1351 1352 void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) { 1353 LiveRange* LR = P.second; 1354 assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && 1355 "Range should start in OldIdx."); 1356 assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx."); 1357 SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber()); 1358 LR->start = NewStart; 1359 LR->valno->def = NewStart; 1360 } 1361 1362 void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) { 1363 for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); 1364 EI != EE; ++EI) 1365 moveExitingFrom(OldIdx, *EI); 1366 } 1367 1368 void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P, 1369 BundleRanges& BR) { 1370 LiveInterval* LI = P.first; 1371 LiveRange* LR = P.second; 1372 bool LiveThrough = LR->end > OldIdx.getRegSlot(); 1373 if (LiveThrough) { 1374 assert((LR->start < NewIdx || BR[LI->reg].Def == LR) && 1375 "Def in bundle should be def range."); 1376 assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && 1377 "If bundle has use for this reg it should be LR."); 1378 BR[LI->reg].Use = LR; 1379 return; 1380 } 1381 1382 SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); 1383 moveKillFlags(LI->reg, OldIdx, LastUse); 1384 1385 if (LR->start < NewIdx) { 1386 // Becoming a new entering range. 1387 assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 && 1388 "Bundle shouldn't be re-defining reg mid-range."); 1389 assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && 1390 "Bundle shouldn't have different use range for same reg."); 1391 LR->end = LastUse.getRegSlot(); 1392 BR[LI->reg].Use = LR; 1393 } else { 1394 // Becoming a new Dead-def. 1395 assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) && 1396 "Live range starting at unexpected slot."); 1397 assert(BR[LI->reg].Def == LR && "Reg should have def range."); 1398 assert(BR[LI->reg].Dead == 0 && 1399 "Can't have def and dead def of same reg in a bundle."); 1400 LR->end = LastUse.getDeadSlot(); 1401 BR[LI->reg].Dead = BR[LI->reg].Def; 1402 BR[LI->reg].Def = 0; 1403 } 1404 } 1405 1406 void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P, 1407 BundleRanges& BR) { 1408 LiveInterval* LI = P.first; 1409 LiveRange* LR = P.second; 1410 if (NewIdx > LR->end) { 1411 // Range extended to bundle. Add to bundle uses. 1412 // Note: Currently adds kill flags to bundle start. 1413 assert(BR[LI->reg].Use == 0 && 1414 "Bundle already has use range for reg."); 1415 moveKillFlags(LI->reg, LR->end, NewIdx); 1416 LR->end = NewIdx.getRegSlot(); 1417 BR[LI->reg].Use = LR; 1418 } else { 1419 assert(BR[LI->reg].Use != 0 && 1420 "Bundle should already have a use range for reg."); 1421 } 1422 } 1423 1424 void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering, 1425 BundleRanges& BR) { 1426 bool GoingUp = NewIdx < OldIdx; 1427 1428 if (GoingUp) { 1429 for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); 1430 EI != EE; ++EI) 1431 moveEnteringUpFromInto(OldIdx, *EI, BR); 1432 } else { 1433 for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); 1434 EI != EE; ++EI) 1435 moveEnteringDownFromInto(OldIdx, *EI, BR); 1436 } 1437 } 1438 1439 void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P, 1440 BundleRanges& BR) { 1441 // TODO: Sane rules for moving ranges into bundles. 1442 } 1443 1444 void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal, 1445 BundleRanges& BR) { 1446 for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); 1447 II != IE; ++II) 1448 moveInternalFromInto(OldIdx, *II, BR); 1449 } 1450 1451 void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P, 1452 BundleRanges& BR) { 1453 LiveInterval* LI = P.first; 1454 LiveRange* LR = P.second; 1455 1456 assert(LR->start.isRegister() && 1457 "Don't know how to merge exiting ECs into bundles yet."); 1458 1459 if (LR->end > NewIdx.getDeadSlot()) { 1460 // This range is becoming an exiting range on the bundle. 1461 // If there was an old dead-def of this reg, delete it. 1462 if (BR[LI->reg].Dead != 0) { 1463 LI->removeRange(*BR[LI->reg].Dead); 1464 BR[LI->reg].Dead = 0; 1465 } 1466 assert(BR[LI->reg].Def == 0 && 1467 "Can't have two defs for the same variable exiting a bundle."); 1468 LR->start = NewIdx.getRegSlot(); 1469 LR->valno->def = LR->start; 1470 BR[LI->reg].Def = LR; 1471 } else { 1472 // This range is becoming internal to the bundle. 1473 assert(LR->end == NewIdx.getRegSlot() && 1474 "Can't bundle def whose kill is before the bundle"); 1475 if (BR[LI->reg].Dead || BR[LI->reg].Def) { 1476 // Already have a def for this. Just delete range. 1477 LI->removeRange(*LR); 1478 } else { 1479 // Make range dead, record. 1480 LR->end = NewIdx.getDeadSlot(); 1481 BR[LI->reg].Dead = LR; 1482 assert(BR[LI->reg].Use == LR && 1483 "Range becoming dead should currently be use."); 1484 } 1485 // In both cases the range is no longer a use on the bundle. 1486 BR[LI->reg].Use = 0; 1487 } 1488 } 1489 1490 void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting, 1491 BundleRanges& BR) { 1492 for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); 1493 EI != EE; ++EI) 1494 moveExitingFromInto(OldIdx, *EI, BR); 1495 } 1496 1497}; 1498 1499void LiveIntervals::handleMove(MachineInstr* MI) { 1500 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); 1501 Indexes->removeMachineInstrFromMaps(MI); 1502 SlotIndex NewIndex = MI->isInsideBundle() ? 1503 Indexes->getInstructionIndex(MI) : 1504 Indexes->insertMachineInstrInMaps(MI); 1505 assert(getMBBStartIdx(MI->getParent()) <= OldIndex && 1506 OldIndex < getMBBEndIdx(MI->getParent()) && 1507 "Cannot handle moves across basic block boundaries."); 1508 assert(!MI->isBundled() && "Can't handle bundled instructions yet."); 1509 1510 HMEditor HME(*this, *MRI, *TRI, NewIndex); 1511 HME.moveAllRangesFrom(MI, OldIndex); 1512} 1513 1514void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, 1515 MachineInstr* BundleStart) { 1516 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); 1517 HMEditor HME(*this, *MRI, *TRI, NewIndex); 1518 HME.moveAllRangesInto(MI, BundleStart); 1519} 1520