1//===-- PPCCTRLoops.cpp - Identify and generate CTR loops -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass identifies loops where we can generate the PPC branch instructions 11// that decrement and test the count register (CTR) (bdnz and friends). 12// 13// The pattern that defines the induction variable can changed depending on 14// prior optimizations. For example, the IndVarSimplify phase run by 'opt' 15// normalizes induction variables, and the Loop Strength Reduction pass 16// run by 'llc' may also make changes to the induction variable. 17// 18// Criteria for CTR loops: 19// - Countable loops (w/ ind. var for a trip count) 20// - Try inner-most loops first 21// - No nested CTR loops. 22// - No function calls in loops. 23// 24//===----------------------------------------------------------------------===// 25 26#include "llvm/Transforms/Scalar.h" 27#include "PPC.h" 28#include "PPCTargetMachine.h" 29#include "llvm/ADT/STLExtras.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/Analysis/LoopInfo.h" 32#include "llvm/Analysis/ScalarEvolutionExpander.h" 33#include "llvm/Analysis/TargetLibraryInfo.h" 34#include "llvm/IR/Constants.h" 35#include "llvm/IR/DerivedTypes.h" 36#include "llvm/IR/Dominators.h" 37#include "llvm/IR/InlineAsm.h" 38#include "llvm/IR/Instructions.h" 39#include "llvm/IR/IntrinsicInst.h" 40#include "llvm/IR/Module.h" 41#include "llvm/IR/ValueHandle.h" 42#include "llvm/PassSupport.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/raw_ostream.h" 46#include "llvm/Transforms/Utils/BasicBlockUtils.h" 47#include "llvm/Transforms/Utils/Local.h" 48#include "llvm/Transforms/Utils/LoopUtils.h" 49 50#ifndef NDEBUG 51#include "llvm/CodeGen/MachineDominators.h" 52#include "llvm/CodeGen/MachineFunction.h" 53#include "llvm/CodeGen/MachineFunctionPass.h" 54#include "llvm/CodeGen/MachineRegisterInfo.h" 55#endif 56 57#include <algorithm> 58#include <vector> 59 60using namespace llvm; 61 62#define DEBUG_TYPE "ctrloops" 63 64#ifndef NDEBUG 65static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1)); 66#endif 67 68STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops"); 69 70namespace llvm { 71 void initializePPCCTRLoopsPass(PassRegistry&); 72#ifndef NDEBUG 73 void initializePPCCTRLoopsVerifyPass(PassRegistry&); 74#endif 75} 76 77namespace { 78 struct PPCCTRLoops : public FunctionPass { 79 80#ifndef NDEBUG 81 static int Counter; 82#endif 83 84 public: 85 static char ID; 86 87 PPCCTRLoops() : FunctionPass(ID), TM(nullptr) { 88 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); 89 } 90 PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) { 91 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); 92 } 93 94 bool runOnFunction(Function &F) override; 95 96 void getAnalysisUsage(AnalysisUsage &AU) const override { 97 AU.addRequired<LoopInfoWrapperPass>(); 98 AU.addPreserved<LoopInfoWrapperPass>(); 99 AU.addRequired<DominatorTreeWrapperPass>(); 100 AU.addPreserved<DominatorTreeWrapperPass>(); 101 AU.addRequired<ScalarEvolutionWrapperPass>(); 102 } 103 104 private: 105 bool mightUseCTR(const Triple &TT, BasicBlock *BB); 106 bool convertToCTRLoop(Loop *L); 107 108 private: 109 PPCTargetMachine *TM; 110 LoopInfo *LI; 111 ScalarEvolution *SE; 112 const DataLayout *DL; 113 DominatorTree *DT; 114 const TargetLibraryInfo *LibInfo; 115 bool PreserveLCSSA; 116 }; 117 118 char PPCCTRLoops::ID = 0; 119#ifndef NDEBUG 120 int PPCCTRLoops::Counter = 0; 121#endif 122 123#ifndef NDEBUG 124 struct PPCCTRLoopsVerify : public MachineFunctionPass { 125 public: 126 static char ID; 127 128 PPCCTRLoopsVerify() : MachineFunctionPass(ID) { 129 initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry()); 130 } 131 132 void getAnalysisUsage(AnalysisUsage &AU) const override { 133 AU.addRequired<MachineDominatorTree>(); 134 MachineFunctionPass::getAnalysisUsage(AU); 135 } 136 137 bool runOnMachineFunction(MachineFunction &MF) override; 138 139 private: 140 MachineDominatorTree *MDT; 141 }; 142 143 char PPCCTRLoopsVerify::ID = 0; 144#endif // NDEBUG 145} // end anonymous namespace 146 147INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", 148 false, false) 149INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 150INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 151INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 152INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", 153 false, false) 154 155FunctionPass *llvm::createPPCCTRLoops(PPCTargetMachine &TM) { 156 return new PPCCTRLoops(TM); 157} 158 159#ifndef NDEBUG 160INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify", 161 "PowerPC CTR Loops Verify", false, false) 162INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 163INITIALIZE_PASS_END(PPCCTRLoopsVerify, "ppc-ctr-loops-verify", 164 "PowerPC CTR Loops Verify", false, false) 165 166FunctionPass *llvm::createPPCCTRLoopsVerify() { 167 return new PPCCTRLoopsVerify(); 168} 169#endif // NDEBUG 170 171bool PPCCTRLoops::runOnFunction(Function &F) { 172 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 173 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 174 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 175 DL = &F.getParent()->getDataLayout(); 176 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 177 LibInfo = TLIP ? &TLIP->getTLI() : nullptr; 178 PreserveLCSSA = mustPreserveAnalysisID(LCSSAID); 179 180 bool MadeChange = false; 181 182 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); 183 I != E; ++I) { 184 Loop *L = *I; 185 if (!L->getParentLoop()) 186 MadeChange |= convertToCTRLoop(L); 187 } 188 189 return MadeChange; 190} 191 192static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) { 193 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 194 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 195 196 return false; 197} 198 199// Determining the address of a TLS variable results in a function call in 200// certain TLS models. 201static bool memAddrUsesCTR(const PPCTargetMachine *TM, 202 const Value *MemAddr) { 203 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 204 if (!GV) { 205 // Recurse to check for constants that refer to TLS global variables. 206 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 207 for (const auto &CO : CV->operands()) 208 if (memAddrUsesCTR(TM, CO)) 209 return true; 210 211 return false; 212 } 213 214 if (!GV->isThreadLocal()) 215 return false; 216 if (!TM) 217 return true; 218 TLSModel::Model Model = TM->getTLSModel(GV); 219 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic; 220} 221 222bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { 223 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 224 J != JE; ++J) { 225 if (CallInst *CI = dyn_cast<CallInst>(J)) { 226 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { 227 // Inline ASM is okay, unless it clobbers the ctr register. 228 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 229 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 230 InlineAsm::ConstraintInfo &C = CIV[i]; 231 if (C.Type != InlineAsm::isInput) 232 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 233 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 234 return true; 235 } 236 237 continue; 238 } 239 240 if (!TM) 241 return true; 242 const TargetLowering *TLI = 243 TM->getSubtargetImpl(*BB->getParent())->getTargetLowering(); 244 245 if (Function *F = CI->getCalledFunction()) { 246 // Most intrinsics don't become function calls, but some might. 247 // sin, cos, exp and log are always calls. 248 unsigned Opcode; 249 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 250 switch (F->getIntrinsicID()) { 251 default: continue; 252 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 253 // we're definitely using CTR. 254 case Intrinsic::ppc_is_decremented_ctr_nonzero: 255 case Intrinsic::ppc_mtctr: 256 return true; 257 258// VisualStudio defines setjmp as _setjmp 259#if defined(_MSC_VER) && defined(setjmp) && \ 260 !defined(setjmp_undefined_for_msvc) 261# pragma push_macro("setjmp") 262# undef setjmp 263# define setjmp_undefined_for_msvc 264#endif 265 266 case Intrinsic::setjmp: 267 268#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 269 // let's return it to _setjmp state 270# pragma pop_macro("setjmp") 271# undef setjmp_undefined_for_msvc 272#endif 273 274 case Intrinsic::longjmp: 275 276 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 277 // because, although it does clobber the counter register, the 278 // control can't then return to inside the loop unless there is also 279 // an eh_sjlj_setjmp. 280 case Intrinsic::eh_sjlj_setjmp: 281 282 case Intrinsic::memcpy: 283 case Intrinsic::memmove: 284 case Intrinsic::memset: 285 case Intrinsic::powi: 286 case Intrinsic::log: 287 case Intrinsic::log2: 288 case Intrinsic::log10: 289 case Intrinsic::exp: 290 case Intrinsic::exp2: 291 case Intrinsic::pow: 292 case Intrinsic::sin: 293 case Intrinsic::cos: 294 return true; 295 case Intrinsic::copysign: 296 if (CI->getArgOperand(0)->getType()->getScalarType()-> 297 isPPC_FP128Ty()) 298 return true; 299 else 300 continue; // ISD::FCOPYSIGN is never a library call. 301 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 302 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 303 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 304 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 305 case Intrinsic::rint: Opcode = ISD::FRINT; break; 306 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 307 case Intrinsic::round: Opcode = ISD::FROUND; break; 308 } 309 } 310 311 // PowerPC does not use [US]DIVREM or other library calls for 312 // operations on regular types which are not otherwise library calls 313 // (i.e. soft float or atomics). If adapting for targets that do, 314 // additional care is required here. 315 316 LibFunc::Func Func; 317 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 318 LibInfo->getLibFunc(F->getName(), Func) && 319 LibInfo->hasOptimizedCodeGen(Func)) { 320 // Non-read-only functions are never treated as intrinsics. 321 if (!CI->onlyReadsMemory()) 322 return true; 323 324 // Conversion happens only for FP calls. 325 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 326 return true; 327 328 switch (Func) { 329 default: return true; 330 case LibFunc::copysign: 331 case LibFunc::copysignf: 332 continue; // ISD::FCOPYSIGN is never a library call. 333 case LibFunc::copysignl: 334 return true; 335 case LibFunc::fabs: 336 case LibFunc::fabsf: 337 case LibFunc::fabsl: 338 continue; // ISD::FABS is never a library call. 339 case LibFunc::sqrt: 340 case LibFunc::sqrtf: 341 case LibFunc::sqrtl: 342 Opcode = ISD::FSQRT; break; 343 case LibFunc::floor: 344 case LibFunc::floorf: 345 case LibFunc::floorl: 346 Opcode = ISD::FFLOOR; break; 347 case LibFunc::nearbyint: 348 case LibFunc::nearbyintf: 349 case LibFunc::nearbyintl: 350 Opcode = ISD::FNEARBYINT; break; 351 case LibFunc::ceil: 352 case LibFunc::ceilf: 353 case LibFunc::ceill: 354 Opcode = ISD::FCEIL; break; 355 case LibFunc::rint: 356 case LibFunc::rintf: 357 case LibFunc::rintl: 358 Opcode = ISD::FRINT; break; 359 case LibFunc::round: 360 case LibFunc::roundf: 361 case LibFunc::roundl: 362 Opcode = ISD::FROUND; break; 363 case LibFunc::trunc: 364 case LibFunc::truncf: 365 case LibFunc::truncl: 366 Opcode = ISD::FTRUNC; break; 367 } 368 369 auto &DL = CI->getModule()->getDataLayout(); 370 MVT VTy = TLI->getSimpleValueType(DL, CI->getArgOperand(0)->getType(), 371 true); 372 if (VTy == MVT::Other) 373 return true; 374 375 if (TLI->isOperationLegalOrCustom(Opcode, VTy)) 376 continue; 377 else if (VTy.isVector() && 378 TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType())) 379 continue; 380 381 return true; 382 } 383 } 384 385 return true; 386 } else if (isa<BinaryOperator>(J) && 387 J->getType()->getScalarType()->isPPC_FP128Ty()) { 388 // Most operations on ppc_f128 values become calls. 389 return true; 390 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 391 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 392 CastInst *CI = cast<CastInst>(J); 393 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 394 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 395 isLargeIntegerTy(TT.isArch32Bit(), CI->getSrcTy()->getScalarType()) || 396 isLargeIntegerTy(TT.isArch32Bit(), CI->getDestTy()->getScalarType())) 397 return true; 398 } else if (isLargeIntegerTy(TT.isArch32Bit(), 399 J->getType()->getScalarType()) && 400 (J->getOpcode() == Instruction::UDiv || 401 J->getOpcode() == Instruction::SDiv || 402 J->getOpcode() == Instruction::URem || 403 J->getOpcode() == Instruction::SRem)) { 404 return true; 405 } else if (TT.isArch32Bit() && 406 isLargeIntegerTy(false, J->getType()->getScalarType()) && 407 (J->getOpcode() == Instruction::Shl || 408 J->getOpcode() == Instruction::AShr || 409 J->getOpcode() == Instruction::LShr)) { 410 // Only on PPC32, for 128-bit integers (specifically not 64-bit 411 // integers), these might be runtime calls. 412 return true; 413 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 414 // On PowerPC, indirect jumps use the counter register. 415 return true; 416 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 417 if (!TM) 418 return true; 419 const TargetLowering *TLI = 420 TM->getSubtargetImpl(*BB->getParent())->getTargetLowering(); 421 422 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 423 return true; 424 } 425 for (Value *Operand : J->operands()) 426 if (memAddrUsesCTR(TM, Operand)) 427 return true; 428 } 429 430 return false; 431} 432 433bool PPCCTRLoops::convertToCTRLoop(Loop *L) { 434 bool MadeChange = false; 435 436 const Triple TT = 437 Triple(L->getHeader()->getParent()->getParent()->getTargetTriple()); 438 if (!TT.isArch32Bit() && !TT.isArch64Bit()) 439 return MadeChange; // Unknown arch. type. 440 441 // Process nested loops first. 442 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) { 443 MadeChange |= convertToCTRLoop(*I); 444 DEBUG(dbgs() << "Nested loop converted\n"); 445 } 446 447 // If a nested loop has been converted, then we can't convert this loop. 448 if (MadeChange) 449 return MadeChange; 450 451#ifndef NDEBUG 452 // Stop trying after reaching the limit (if any). 453 int Limit = CTRLoopLimit; 454 if (Limit >= 0) { 455 if (Counter >= CTRLoopLimit) 456 return false; 457 Counter++; 458 } 459#endif 460 461 // We don't want to spill/restore the counter register, and so we don't 462 // want to use the counter register if the loop contains calls. 463 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 464 I != IE; ++I) 465 if (mightUseCTR(TT, *I)) 466 return MadeChange; 467 468 SmallVector<BasicBlock*, 4> ExitingBlocks; 469 L->getExitingBlocks(ExitingBlocks); 470 471 BasicBlock *CountedExitBlock = nullptr; 472 const SCEV *ExitCount = nullptr; 473 BranchInst *CountedExitBranch = nullptr; 474 for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(), 475 IE = ExitingBlocks.end(); I != IE; ++I) { 476 const SCEV *EC = SE->getExitCount(L, *I); 477 DEBUG(dbgs() << "Exit Count for " << *L << " from block " << 478 (*I)->getName() << ": " << *EC << "\n"); 479 if (isa<SCEVCouldNotCompute>(EC)) 480 continue; 481 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) { 482 if (ConstEC->getValue()->isZero()) 483 continue; 484 } else if (!SE->isLoopInvariant(EC, L)) 485 continue; 486 487 if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32)) 488 continue; 489 490 // We now have a loop-invariant count of loop iterations (which is not the 491 // constant zero) for which we know that this loop will not exit via this 492 // exisiting block. 493 494 // We need to make sure that this block will run on every loop iteration. 495 // For this to be true, we must dominate all blocks with backedges. Such 496 // blocks are in-loop predecessors to the header block. 497 bool NotAlways = false; 498 for (pred_iterator PI = pred_begin(L->getHeader()), 499 PIE = pred_end(L->getHeader()); PI != PIE; ++PI) { 500 if (!L->contains(*PI)) 501 continue; 502 503 if (!DT->dominates(*I, *PI)) { 504 NotAlways = true; 505 break; 506 } 507 } 508 509 if (NotAlways) 510 continue; 511 512 // Make sure this blocks ends with a conditional branch. 513 Instruction *TI = (*I)->getTerminator(); 514 if (!TI) 515 continue; 516 517 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 518 if (!BI->isConditional()) 519 continue; 520 521 CountedExitBranch = BI; 522 } else 523 continue; 524 525 // Note that this block may not be the loop latch block, even if the loop 526 // has a latch block. 527 CountedExitBlock = *I; 528 ExitCount = EC; 529 break; 530 } 531 532 if (!CountedExitBlock) 533 return MadeChange; 534 535 BasicBlock *Preheader = L->getLoopPreheader(); 536 537 // If we don't have a preheader, then insert one. If we already have a 538 // preheader, then we can use it (except if the preheader contains a use of 539 // the CTR register because some such uses might be reordered by the 540 // selection DAG after the mtctr instruction). 541 if (!Preheader || mightUseCTR(TT, Preheader)) 542 Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA); 543 if (!Preheader) 544 return MadeChange; 545 546 DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName() << "\n"); 547 548 // Insert the count into the preheader and replace the condition used by the 549 // selected branch. 550 MadeChange = true; 551 552 SCEVExpander SCEVE(*SE, Preheader->getModule()->getDataLayout(), "loopcnt"); 553 LLVMContext &C = SE->getContext(); 554 Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) : 555 Type::getInt32Ty(C); 556 if (!ExitCount->getType()->isPointerTy() && 557 ExitCount->getType() != CountType) 558 ExitCount = SE->getZeroExtendExpr(ExitCount, CountType); 559 ExitCount = SE->getAddExpr(ExitCount, SE->getOne(CountType)); 560 Value *ECValue = 561 SCEVE.expandCodeFor(ExitCount, CountType, Preheader->getTerminator()); 562 563 IRBuilder<> CountBuilder(Preheader->getTerminator()); 564 Module *M = Preheader->getParent()->getParent(); 565 Value *MTCTRFunc = Intrinsic::getDeclaration(M, Intrinsic::ppc_mtctr, 566 CountType); 567 CountBuilder.CreateCall(MTCTRFunc, ECValue); 568 569 IRBuilder<> CondBuilder(CountedExitBranch); 570 Value *DecFunc = 571 Intrinsic::getDeclaration(M, Intrinsic::ppc_is_decremented_ctr_nonzero); 572 Value *NewCond = CondBuilder.CreateCall(DecFunc, {}); 573 Value *OldCond = CountedExitBranch->getCondition(); 574 CountedExitBranch->setCondition(NewCond); 575 576 // The false branch must exit the loop. 577 if (!L->contains(CountedExitBranch->getSuccessor(0))) 578 CountedExitBranch->swapSuccessors(); 579 580 // The old condition may be dead now, and may have even created a dead PHI 581 // (the original induction variable). 582 RecursivelyDeleteTriviallyDeadInstructions(OldCond); 583 DeleteDeadPHIs(CountedExitBlock); 584 585 ++NumCTRLoops; 586 return MadeChange; 587} 588 589#ifndef NDEBUG 590static bool clobbersCTR(const MachineInstr *MI) { 591 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 592 const MachineOperand &MO = MI->getOperand(i); 593 if (MO.isReg()) { 594 if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8)) 595 return true; 596 } else if (MO.isRegMask()) { 597 if (MO.clobbersPhysReg(PPC::CTR) || MO.clobbersPhysReg(PPC::CTR8)) 598 return true; 599 } 600 } 601 602 return false; 603} 604 605static bool verifyCTRBranch(MachineBasicBlock *MBB, 606 MachineBasicBlock::iterator I) { 607 MachineBasicBlock::iterator BI = I; 608 SmallSet<MachineBasicBlock *, 16> Visited; 609 SmallVector<MachineBasicBlock *, 8> Preds; 610 bool CheckPreds; 611 612 if (I == MBB->begin()) { 613 Visited.insert(MBB); 614 goto queue_preds; 615 } else 616 --I; 617 618check_block: 619 Visited.insert(MBB); 620 if (I == MBB->end()) 621 goto queue_preds; 622 623 CheckPreds = true; 624 for (MachineBasicBlock::iterator IE = MBB->begin();; --I) { 625 unsigned Opc = I->getOpcode(); 626 if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) { 627 CheckPreds = false; 628 break; 629 } 630 631 if (I != BI && clobbersCTR(I)) { 632 DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" << 633 MBB->getFullName() << ") instruction " << *I << 634 " clobbers CTR, invalidating " << "BB#" << 635 BI->getParent()->getNumber() << " (" << 636 BI->getParent()->getFullName() << ") instruction " << 637 *BI << "\n"); 638 return false; 639 } 640 641 if (I == IE) 642 break; 643 } 644 645 if (!CheckPreds && Preds.empty()) 646 return true; 647 648 if (CheckPreds) { 649queue_preds: 650 if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) { 651 DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" << 652 BI->getParent()->getNumber() << " (" << 653 BI->getParent()->getFullName() << ") instruction " << 654 *BI << "\n"); 655 return false; 656 } 657 658 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), 659 PIE = MBB->pred_end(); PI != PIE; ++PI) 660 Preds.push_back(*PI); 661 } 662 663 do { 664 MBB = Preds.pop_back_val(); 665 if (!Visited.count(MBB)) { 666 I = MBB->getLastNonDebugInstr(); 667 goto check_block; 668 } 669 } while (!Preds.empty()); 670 671 return true; 672} 673 674bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) { 675 MDT = &getAnalysis<MachineDominatorTree>(); 676 677 // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before 678 // any other instructions that might clobber the ctr register. 679 for (MachineFunction::iterator I = MF.begin(), IE = MF.end(); 680 I != IE; ++I) { 681 MachineBasicBlock *MBB = &*I; 682 if (!MDT->isReachableFromEntry(MBB)) 683 continue; 684 685 for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(), 686 MIIE = MBB->end(); MII != MIIE; ++MII) { 687 unsigned Opc = MII->getOpcode(); 688 if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ || 689 Opc == PPC::BDZ8 || Opc == PPC::BDZ) 690 if (!verifyCTRBranch(MBB, MII)) 691 llvm_unreachable("Invalid PPC CTR loop!"); 692 } 693 } 694 695 return false; 696} 697#endif // NDEBUG 698