1//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// \file 10/// 11/// This file provides internal interfaces used to implement the InstCombine. 12/// 13//===----------------------------------------------------------------------===// 14 15#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 16#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17 18#include "llvm/Analysis/AssumptionCache.h" 19#include "llvm/Analysis/LoopInfo.h" 20#include "llvm/Analysis/TargetFolder.h" 21#include "llvm/Analysis/ValueTracking.h" 22#include "llvm/IR/Dominators.h" 23#include "llvm/IR/IRBuilder.h" 24#include "llvm/IR/InstVisitor.h" 25#include "llvm/IR/IntrinsicInst.h" 26#include "llvm/IR/Operator.h" 27#include "llvm/IR/PatternMatch.h" 28#include "llvm/Pass.h" 29#include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 30 31#define DEBUG_TYPE "instcombine" 32 33namespace llvm { 34class CallSite; 35class DataLayout; 36class DominatorTree; 37class TargetLibraryInfo; 38class DbgDeclareInst; 39class MemIntrinsic; 40class MemSetInst; 41 42/// \brief Specific patterns of select instructions we can match. 43enum SelectPatternFlavor { 44 SPF_UNKNOWN = 0, 45 SPF_SMIN, 46 SPF_UMIN, 47 SPF_SMAX, 48 SPF_UMAX, 49 SPF_ABS, 50 SPF_NABS 51}; 52 53/// \brief Assign a complexity or rank value to LLVM Values. 54/// 55/// This routine maps IR values to various complexity ranks: 56/// 0 -> undef 57/// 1 -> Constants 58/// 2 -> Other non-instructions 59/// 3 -> Arguments 60/// 3 -> Unary operations 61/// 4 -> Other instructions 62static inline unsigned getComplexity(Value *V) { 63 if (isa<Instruction>(V)) { 64 if (BinaryOperator::isNeg(V) || BinaryOperator::isFNeg(V) || 65 BinaryOperator::isNot(V)) 66 return 3; 67 return 4; 68 } 69 if (isa<Argument>(V)) 70 return 3; 71 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2; 72} 73 74/// \brief Add one to a Constant 75static inline Constant *AddOne(Constant *C) { 76 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 77} 78/// \brief Subtract one from a Constant 79static inline Constant *SubOne(Constant *C) { 80 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1)); 81} 82 83/// \brief Return true if the specified value is free to invert (apply ~ to). 84/// This happens in cases where the ~ can be eliminated. If WillInvertAllUses 85/// is true, work under the assumption that the caller intends to remove all 86/// uses of V and only keep uses of ~V. 87/// 88static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) { 89 // ~(~(X)) -> X. 90 if (BinaryOperator::isNot(V)) 91 return true; 92 93 // Constants can be considered to be not'ed values. 94 if (isa<ConstantInt>(V)) 95 return true; 96 97 // Compares can be inverted if all of their uses are being modified to use the 98 // ~V. 99 if (isa<CmpInst>(V)) 100 return WillInvertAllUses; 101 102 // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1 103 // - Constant) - A` if we are willing to invert all of the uses. 104 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) 105 if (BO->getOpcode() == Instruction::Add || 106 BO->getOpcode() == Instruction::Sub) 107 if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1))) 108 return WillInvertAllUses; 109 110 return false; 111} 112 113 114/// \brief Specific patterns of overflow check idioms that we match. 115enum OverflowCheckFlavor { 116 OCF_UNSIGNED_ADD, 117 OCF_SIGNED_ADD, 118 OCF_UNSIGNED_SUB, 119 OCF_SIGNED_SUB, 120 OCF_UNSIGNED_MUL, 121 OCF_SIGNED_MUL, 122 123 OCF_INVALID 124}; 125 126/// \brief Returns the OverflowCheckFlavor corresponding to a overflow_with_op 127/// intrinsic. 128static inline OverflowCheckFlavor 129IntrinsicIDToOverflowCheckFlavor(unsigned ID) { 130 switch (ID) { 131 default: 132 return OCF_INVALID; 133 case Intrinsic::uadd_with_overflow: 134 return OCF_UNSIGNED_ADD; 135 case Intrinsic::sadd_with_overflow: 136 return OCF_SIGNED_ADD; 137 case Intrinsic::usub_with_overflow: 138 return OCF_UNSIGNED_SUB; 139 case Intrinsic::ssub_with_overflow: 140 return OCF_SIGNED_SUB; 141 case Intrinsic::umul_with_overflow: 142 return OCF_UNSIGNED_MUL; 143 case Intrinsic::smul_with_overflow: 144 return OCF_SIGNED_MUL; 145 } 146} 147 148/// \brief An IRBuilder inserter that adds new instructions to the instcombine 149/// worklist. 150class LLVM_LIBRARY_VISIBILITY InstCombineIRInserter 151 : public IRBuilderDefaultInserter<true> { 152 InstCombineWorklist &Worklist; 153 AssumptionCache *AC; 154 155public: 156 InstCombineIRInserter(InstCombineWorklist &WL, AssumptionCache *AC) 157 : Worklist(WL), AC(AC) {} 158 159 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 160 BasicBlock::iterator InsertPt) const { 161 IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt); 162 Worklist.Add(I); 163 164 using namespace llvm::PatternMatch; 165 if (match(I, m_Intrinsic<Intrinsic::assume>())) 166 AC->registerAssumption(cast<CallInst>(I)); 167 } 168}; 169 170/// \brief The core instruction combiner logic. 171/// 172/// This class provides both the logic to recursively visit instructions and 173/// combine them, as well as the pass infrastructure for running this as part 174/// of the LLVM pass pipeline. 175class LLVM_LIBRARY_VISIBILITY InstCombiner 176 : public InstVisitor<InstCombiner, Instruction *> { 177 // FIXME: These members shouldn't be public. 178public: 179 /// \brief A worklist of the instructions that need to be simplified. 180 InstCombineWorklist &Worklist; 181 182 /// \brief An IRBuilder that automatically inserts new instructions into the 183 /// worklist. 184 typedef IRBuilder<true, TargetFolder, InstCombineIRInserter> BuilderTy; 185 BuilderTy *Builder; 186 187private: 188 // Mode in which we are running the combiner. 189 const bool MinimizeSize; 190 191 // Required analyses. 192 // FIXME: These can never be null and should be references. 193 AssumptionCache *AC; 194 TargetLibraryInfo *TLI; 195 DominatorTree *DT; 196 const DataLayout &DL; 197 198 // Optional analyses. When non-null, these can both be used to do better 199 // combining and will be updated to reflect any changes. 200 LoopInfo *LI; 201 202 bool MadeIRChange; 203 204public: 205 InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder, 206 bool MinimizeSize, AssumptionCache *AC, TargetLibraryInfo *TLI, 207 DominatorTree *DT, const DataLayout &DL, LoopInfo *LI) 208 : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize), 209 AC(AC), TLI(TLI), DT(DT), DL(DL), LI(LI), MadeIRChange(false) {} 210 211 /// \brief Run the combiner over the entire worklist until it is empty. 212 /// 213 /// \returns true if the IR is changed. 214 bool run(); 215 216 AssumptionCache *getAssumptionCache() const { return AC; } 217 218 const DataLayout &getDataLayout() const { return DL; } 219 220 DominatorTree *getDominatorTree() const { return DT; } 221 222 LoopInfo *getLoopInfo() const { return LI; } 223 224 TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; } 225 226 // Visitation implementation - Implement instruction combining for different 227 // instruction types. The semantics are as follows: 228 // Return Value: 229 // null - No change was made 230 // I - Change was made, I is still valid, I may be dead though 231 // otherwise - Change was made, replace I with returned instruction 232 // 233 Instruction *visitAdd(BinaryOperator &I); 234 Instruction *visitFAdd(BinaryOperator &I); 235 Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty); 236 Instruction *visitSub(BinaryOperator &I); 237 Instruction *visitFSub(BinaryOperator &I); 238 Instruction *visitMul(BinaryOperator &I); 239 Value *foldFMulConst(Instruction *FMulOrDiv, Constant *C, 240 Instruction *InsertBefore); 241 Instruction *visitFMul(BinaryOperator &I); 242 Instruction *visitURem(BinaryOperator &I); 243 Instruction *visitSRem(BinaryOperator &I); 244 Instruction *visitFRem(BinaryOperator &I); 245 bool SimplifyDivRemOfSelect(BinaryOperator &I); 246 Instruction *commonRemTransforms(BinaryOperator &I); 247 Instruction *commonIRemTransforms(BinaryOperator &I); 248 Instruction *commonDivTransforms(BinaryOperator &I); 249 Instruction *commonIDivTransforms(BinaryOperator &I); 250 Instruction *visitUDiv(BinaryOperator &I); 251 Instruction *visitSDiv(BinaryOperator &I); 252 Instruction *visitFDiv(BinaryOperator &I); 253 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 254 Value *FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS); 255 Value *FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS); 256 Instruction *visitAnd(BinaryOperator &I); 257 Value *FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction *CxtI); 258 Value *FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS); 259 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *A, 260 Value *B, Value *C); 261 Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op, Value *A, 262 Value *B, Value *C); 263 Instruction *visitOr(BinaryOperator &I); 264 Instruction *visitXor(BinaryOperator &I); 265 Instruction *visitShl(BinaryOperator &I); 266 Instruction *visitAShr(BinaryOperator &I); 267 Instruction *visitLShr(BinaryOperator &I); 268 Instruction *commonShiftTransforms(BinaryOperator &I); 269 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI, 270 Constant *RHSC); 271 Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 272 GlobalVariable *GV, CmpInst &ICI, 273 ConstantInt *AndCst = nullptr); 274 Instruction *visitFCmpInst(FCmpInst &I); 275 Instruction *visitICmpInst(ICmpInst &I); 276 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI); 277 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Instruction *LHS, 278 ConstantInt *RHS); 279 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI, 280 ConstantInt *DivRHS); 281 Instruction *FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *DivI, 282 ConstantInt *DivRHS); 283 Instruction *FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A, 284 ConstantInt *CI1, ConstantInt *CI2); 285 Instruction *FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A, 286 ConstantInt *CI1, ConstantInt *CI2); 287 Instruction *FoldICmpAddOpCst(Instruction &ICI, Value *X, ConstantInt *CI, 288 ICmpInst::Predicate Pred); 289 Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 290 ICmpInst::Predicate Cond, Instruction &I); 291 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 292 BinaryOperator &I); 293 Instruction *commonCastTransforms(CastInst &CI); 294 Instruction *commonPointerCastTransforms(CastInst &CI); 295 Instruction *visitTrunc(TruncInst &CI); 296 Instruction *visitZExt(ZExtInst &CI); 297 Instruction *visitSExt(SExtInst &CI); 298 Instruction *visitFPTrunc(FPTruncInst &CI); 299 Instruction *visitFPExt(CastInst &CI); 300 Instruction *visitFPToUI(FPToUIInst &FI); 301 Instruction *visitFPToSI(FPToSIInst &FI); 302 Instruction *visitUIToFP(CastInst &CI); 303 Instruction *visitSIToFP(CastInst &CI); 304 Instruction *visitPtrToInt(PtrToIntInst &CI); 305 Instruction *visitIntToPtr(IntToPtrInst &CI); 306 Instruction *visitBitCast(BitCastInst &CI); 307 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 308 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 309 Instruction *FoldSelectIntoOp(SelectInst &SI, Value *, Value *); 310 Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 311 Value *A, Value *B, Instruction &Outer, 312 SelectPatternFlavor SPF2, Value *C); 313 Instruction *FoldItoFPtoI(Instruction &FI); 314 Instruction *visitSelectInst(SelectInst &SI); 315 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 316 Instruction *visitCallInst(CallInst &CI); 317 Instruction *visitInvokeInst(InvokeInst &II); 318 319 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 320 Instruction *visitPHINode(PHINode &PN); 321 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 322 Instruction *visitAllocaInst(AllocaInst &AI); 323 Instruction *visitAllocSite(Instruction &FI); 324 Instruction *visitFree(CallInst &FI); 325 Instruction *visitLoadInst(LoadInst &LI); 326 Instruction *visitStoreInst(StoreInst &SI); 327 Instruction *visitBranchInst(BranchInst &BI); 328 Instruction *visitSwitchInst(SwitchInst &SI); 329 Instruction *visitReturnInst(ReturnInst &RI); 330 Instruction *visitInsertValueInst(InsertValueInst &IV); 331 Instruction *visitInsertElementInst(InsertElementInst &IE); 332 Instruction *visitExtractElementInst(ExtractElementInst &EI); 333 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 334 Instruction *visitExtractValueInst(ExtractValueInst &EV); 335 Instruction *visitLandingPadInst(LandingPadInst &LI); 336 337 // visitInstruction - Specify what to return for unhandled instructions... 338 Instruction *visitInstruction(Instruction &I) { return nullptr; } 339 340 // True when DB dominates all uses of DI execpt UI. 341 // UI must be in the same block as DI. 342 // The routine checks that the DI parent and DB are different. 343 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 344 const BasicBlock *DB) const; 345 346 // Replace select with select operand SIOpd in SI-ICmp sequence when possible 347 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 348 const unsigned SIOpd); 349 350private: 351 bool ShouldChangeType(Type *From, Type *To) const; 352 Value *dyn_castNegVal(Value *V) const; 353 Value *dyn_castFNegVal(Value *V, bool NoSignedZero = false) const; 354 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 355 SmallVectorImpl<Value *> &NewIndices); 356 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI); 357 358 /// \brief Classify whether a cast is worth optimizing. 359 /// 360 /// Returns true if the cast from "V to Ty" actually results in any code 361 /// being generated and is interesting to optimize out. If the cast can be 362 /// eliminated by some other simple transformation, we prefer to do the 363 /// simplification first. 364 bool ShouldOptimizeCast(Instruction::CastOps opcode, const Value *V, 365 Type *Ty); 366 367 /// \brief Try to optimize a sequence of instructions checking if an operation 368 /// on LHS and RHS overflows. 369 /// 370 /// If a simplification is possible, stores the simplified result of the 371 /// operation in OperationResult and result of the overflow check in 372 /// OverflowResult, and return true. If no simplification is possible, 373 /// returns false. 374 bool OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, Value *RHS, 375 Instruction &CtxI, Value *&OperationResult, 376 Constant *&OverflowResult); 377 378 Instruction *visitCallSite(CallSite CS); 379 Instruction *tryOptimizeCall(CallInst *CI); 380 bool transformConstExprCastCall(CallSite CS); 381 Instruction *transformCallThroughTrampoline(CallSite CS, 382 IntrinsicInst *Tramp); 383 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI, 384 bool DoXform = true); 385 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 386 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction &CxtI); 387 bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction &CxtI); 388 bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction &CxtI); 389 bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction &CxtI); 390 Value *EmitGEPOffset(User *GEP); 391 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 392 Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask); 393 394public: 395 /// \brief Inserts an instruction \p New before instruction \p Old 396 /// 397 /// Also adds the new instruction to the worklist and returns \p New so that 398 /// it is suitable for use as the return from the visitation patterns. 399 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 400 assert(New && !New->getParent() && 401 "New instruction already inserted into a basic block!"); 402 BasicBlock *BB = Old.getParent(); 403 BB->getInstList().insert(&Old, New); // Insert inst 404 Worklist.Add(New); 405 return New; 406 } 407 408 /// \brief Same as InsertNewInstBefore, but also sets the debug loc. 409 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 410 New->setDebugLoc(Old.getDebugLoc()); 411 return InsertNewInstBefore(New, Old); 412 } 413 414 /// \brief A combiner-aware RAUW-like routine. 415 /// 416 /// This method is to be used when an instruction is found to be dead, 417 /// replacable with another preexisting expression. Here we add all uses of 418 /// I to the worklist, replace all uses of I with the new value, then return 419 /// I, so that the inst combiner will know that I was modified. 420 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) { 421 // If there are no uses to replace, then we return nullptr to indicate that 422 // no changes were made to the program. 423 if (I.use_empty()) return nullptr; 424 425 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist. 426 427 // If we are replacing the instruction with itself, this must be in a 428 // segment of unreachable code, so just clobber the instruction. 429 if (&I == V) 430 V = UndefValue::get(I.getType()); 431 432 DEBUG(dbgs() << "IC: Replacing " << I << "\n" 433 << " with " << *V << '\n'); 434 435 I.replaceAllUsesWith(V); 436 return &I; 437 } 438 439 /// Creates a result tuple for an overflow intrinsic \p II with a given 440 /// \p Result and a constant \p Overflow value. 441 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result, 442 Constant *Overflow) { 443 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 444 StructType *ST = cast<StructType>(II->getType()); 445 Constant *Struct = ConstantStruct::get(ST, V); 446 return InsertValueInst::Create(Struct, Result, 0); 447 } 448 449 /// \brief Combiner aware instruction erasure. 450 /// 451 /// When dealing with an instruction that has side effects or produces a void 452 /// value, we can't rely on DCE to delete the instruction. Instead, visit 453 /// methods should return the value returned by this function. 454 Instruction *EraseInstFromFunction(Instruction &I) { 455 DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 456 457 assert(I.use_empty() && "Cannot erase instruction that is used!"); 458 // Make sure that we reprocess all operands now that we reduced their 459 // use counts. 460 if (I.getNumOperands() < 8) { 461 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i) 462 if (Instruction *Op = dyn_cast<Instruction>(*i)) 463 Worklist.Add(Op); 464 } 465 Worklist.Remove(&I); 466 I.eraseFromParent(); 467 MadeIRChange = true; 468 return nullptr; // Don't do anything with FI 469 } 470 471 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 472 unsigned Depth, Instruction *CxtI) const { 473 return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI, 474 DT); 475 } 476 477 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0, 478 Instruction *CxtI = nullptr) const { 479 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, AC, CxtI, DT); 480 } 481 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0, 482 Instruction *CxtI = nullptr) const { 483 return llvm::ComputeNumSignBits(Op, DL, Depth, AC, CxtI, DT); 484 } 485 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 486 unsigned Depth = 0, Instruction *CxtI = nullptr) const { 487 return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AC, CxtI, 488 DT); 489 } 490 OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS, 491 const Instruction *CxtI) { 492 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT); 493 } 494 OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, 495 const Instruction *CxtI) { 496 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT); 497 } 498 499private: 500 /// \brief Performs a few simplifications for operators which are associative 501 /// or commutative. 502 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 503 504 /// \brief Tries to simplify binary operations which some other binary 505 /// operation distributes over. 506 /// 507 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 508 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 509 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 510 /// value, or null if it didn't simplify. 511 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 512 513 /// \brief Attempts to replace V with a simpler value based on the demanded 514 /// bits. 515 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero, 516 APInt &KnownOne, unsigned Depth, 517 Instruction *CxtI); 518 bool SimplifyDemandedBits(Use &U, APInt DemandedMask, APInt &KnownZero, 519 APInt &KnownOne, unsigned Depth = 0); 520 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 521 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 522 Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl, 523 APInt DemandedMask, APInt &KnownZero, 524 APInt &KnownOne); 525 526 /// \brief Tries to simplify operands to an integer instruction based on its 527 /// demanded bits. 528 bool SimplifyDemandedInstructionBits(Instruction &Inst); 529 530 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 531 APInt &UndefElts, unsigned Depth = 0); 532 533 Value *SimplifyVectorOp(BinaryOperator &Inst); 534 Value *SimplifyBSwap(BinaryOperator &Inst); 535 536 // FoldOpIntoPhi - Given a binary operator, cast instruction, or select 537 // which has a PHI node as operand #0, see if we can fold the instruction 538 // into the PHI (which is only possible if all operands to the PHI are 539 // constants). 540 // 541 Instruction *FoldOpIntoPhi(Instruction &I); 542 543 /// \brief Try to rotate an operation below a PHI node, using PHI nodes for 544 /// its operands. 545 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN); 546 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN); 547 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN); 548 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN); 549 550 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS, 551 ConstantInt *AndRHS, BinaryOperator &TheAnd); 552 553 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask, 554 bool isSub, Instruction &I); 555 Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned, 556 bool Inside); 557 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 558 Instruction *MatchBSwap(BinaryOperator &I); 559 bool SimplifyStoreAtEndOfBlock(StoreInst &SI); 560 Instruction *SimplifyMemTransfer(MemIntrinsic *MI); 561 Instruction *SimplifyMemSet(MemSetInst *MI); 562 563 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 564 565 /// \brief Returns a value X such that Val = X * Scale, or null if none. 566 /// 567 /// If the multiplication is known not to overflow then NoSignedWrap is set. 568 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 569}; 570 571} // end namespace llvm. 572 573#undef DEBUG_TYPE 574 575#endif 576