InstructionCombining.cpp revision 165dac08d1bb8428b32a5f39cdd3dbee2888987f
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// InstructionCombining - Combine instructions to form fewer, simple 11// instructions. This pass does not modify the CFG. This pass is where 12// algebraic simplification happens. 13// 14// This pass combines things like: 15// %Y = add i32 %X, 1 16// %Z = add i32 %Y, 1 17// into: 18// %Z = add i32 %X, 2 19// 20// This is a simple worklist driven algorithm. 21// 22// This pass guarantees that the following canonicalizations are performed on 23// the program: 24// 1. If a binary operator has a constant operand, it is moved to the RHS 25// 2. Bitwise operators with constant operands are always grouped so that 26// shifts are performed first, then or's, then and's, then xor's. 27// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 28// 4. All cmp instructions on boolean values are replaced with logical ops 29// 5. add X, X is represented as (X*2) => (X << 1) 30// 6. Multiplies with a power-of-two constant argument are transformed into 31// shifts. 32// ... etc. 33// 34//===----------------------------------------------------------------------===// 35 36#define DEBUG_TYPE "instcombine" 37#include "llvm/Transforms/Scalar.h" 38#include "InstCombine.h" 39#include "llvm/IntrinsicInst.h" 40#include "llvm/Analysis/ConstantFolding.h" 41#include "llvm/Analysis/InstructionSimplify.h" 42#include "llvm/Analysis/MemoryBuiltins.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Transforms/Utils/Local.h" 45#include "llvm/Support/CFG.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/GetElementPtrTypeIterator.h" 48#include "llvm/Support/PatternMatch.h" 49#include "llvm/ADT/SmallPtrSet.h" 50#include "llvm/ADT/Statistic.h" 51#include <algorithm> 52#include <climits> 53using namespace llvm; 54using namespace llvm::PatternMatch; 55 56STATISTIC(NumCombined , "Number of insts combined"); 57STATISTIC(NumConstProp, "Number of constant folds"); 58STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 59STATISTIC(NumSunkInst , "Number of instructions sunk"); 60 61 62char InstCombiner::ID = 0; 63static RegisterPass<InstCombiner> 64X("instcombine", "Combine redundant instructions"); 65 66void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 67 AU.addPreservedID(LCSSAID); 68 AU.setPreservesCFG(); 69} 70 71 72/// ShouldChangeType - Return true if it is desirable to convert a computation 73/// from 'From' to 'To'. We don't want to convert from a legal to an illegal 74/// type for example, or from a smaller to a larger illegal type. 75bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const { 76 assert(From->isIntegerTy() && To->isIntegerTy()); 77 78 // If we don't have TD, we don't know if the source/dest are legal. 79 if (!TD) return false; 80 81 unsigned FromWidth = From->getPrimitiveSizeInBits(); 82 unsigned ToWidth = To->getPrimitiveSizeInBits(); 83 bool FromLegal = TD->isLegalInteger(FromWidth); 84 bool ToLegal = TD->isLegalInteger(ToWidth); 85 86 // If this is a legal integer from type, and the result would be an illegal 87 // type, don't do the transformation. 88 if (FromLegal && !ToLegal) 89 return false; 90 91 // Otherwise, if both are illegal, do not increase the size of the result. We 92 // do allow things like i160 -> i64, but not i64 -> i160. 93 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 94 return false; 95 96 return true; 97} 98 99 100// SimplifyCommutative - This performs a few simplifications for commutative 101// operators: 102// 103// 1. Order operands such that they are listed from right (least complex) to 104// left (most complex). This puts constants before unary operators before 105// binary operators. 106// 107// 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2)) 108// 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2)) 109// 110bool InstCombiner::SimplifyCommutative(BinaryOperator &I) { 111 bool Changed = false; 112 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) 113 Changed = !I.swapOperands(); 114 115 if (!I.isAssociative()) return Changed; 116 117 Instruction::BinaryOps Opcode = I.getOpcode(); 118 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0))) 119 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) { 120 if (isa<Constant>(I.getOperand(1))) { 121 Constant *Folded = ConstantExpr::get(I.getOpcode(), 122 cast<Constant>(I.getOperand(1)), 123 cast<Constant>(Op->getOperand(1))); 124 I.setOperand(0, Op->getOperand(0)); 125 I.setOperand(1, Folded); 126 return true; 127 } 128 129 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1))) 130 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) && 131 Op->hasOneUse() && Op1->hasOneUse()) { 132 Constant *C1 = cast<Constant>(Op->getOperand(1)); 133 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 134 135 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2)) 136 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2); 137 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0), 138 Op1->getOperand(0), 139 Op1->getName(), &I); 140 Worklist.Add(New); 141 I.setOperand(0, New); 142 I.setOperand(1, Folded); 143 return true; 144 } 145 } 146 return Changed; 147} 148 149// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 150// if the LHS is a constant zero (which is the 'negate' form). 151// 152Value *InstCombiner::dyn_castNegVal(Value *V) const { 153 if (BinaryOperator::isNeg(V)) 154 return BinaryOperator::getNegArgument(V); 155 156 // Constants can be considered to be negated values if they can be folded. 157 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 158 return ConstantExpr::getNeg(C); 159 160 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 161 if (C->getType()->getElementType()->isIntegerTy()) 162 return ConstantExpr::getNeg(C); 163 164 return 0; 165} 166 167// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 168// instruction if the LHS is a constant negative zero (which is the 'negate' 169// form). 170// 171Value *InstCombiner::dyn_castFNegVal(Value *V) const { 172 if (BinaryOperator::isFNeg(V)) 173 return BinaryOperator::getFNegArgument(V); 174 175 // Constants can be considered to be negated values if they can be folded. 176 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 177 return ConstantExpr::getFNeg(C); 178 179 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 180 if (C->getType()->getElementType()->isFloatingPointTy()) 181 return ConstantExpr::getFNeg(C); 182 183 return 0; 184} 185 186static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 187 InstCombiner *IC) { 188 if (CastInst *CI = dyn_cast<CastInst>(&I)) 189 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 190 191 // Figure out if the constant is the left or the right argument. 192 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 193 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 194 195 if (Constant *SOC = dyn_cast<Constant>(SO)) { 196 if (ConstIsRHS) 197 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 198 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 199 } 200 201 Value *Op0 = SO, *Op1 = ConstOperand; 202 if (!ConstIsRHS) 203 std::swap(Op0, Op1); 204 205 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 206 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 207 SO->getName()+".op"); 208 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 209 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 210 SO->getName()+".cmp"); 211 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 212 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 213 SO->getName()+".cmp"); 214 llvm_unreachable("Unknown binary instruction type!"); 215} 216 217// FoldOpIntoSelect - Given an instruction with a select as one operand and a 218// constant as the other operand, try to fold the binary operator into the 219// select arguments. This also works for Cast instructions, which obviously do 220// not have a second operand. 221Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 222 // Don't modify shared select instructions 223 if (!SI->hasOneUse()) return 0; 224 Value *TV = SI->getOperand(1); 225 Value *FV = SI->getOperand(2); 226 227 if (isa<Constant>(TV) || isa<Constant>(FV)) { 228 // Bool selects with constant operands can be folded to logical ops. 229 if (SI->getType()->isIntegerTy(1)) return 0; 230 231 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 232 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 233 234 return SelectInst::Create(SI->getCondition(), SelectTrueVal, 235 SelectFalseVal); 236 } 237 return 0; 238} 239 240 241/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 242/// has a PHI node as operand #0, see if we can fold the instruction into the 243/// PHI (which is only possible if all operands to the PHI are constants). 244/// 245/// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms 246/// that would normally be unprofitable because they strongly encourage jump 247/// threading. 248Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I, 249 bool AllowAggressive) { 250 AllowAggressive = false; 251 PHINode *PN = cast<PHINode>(I.getOperand(0)); 252 unsigned NumPHIValues = PN->getNumIncomingValues(); 253 if (NumPHIValues == 0 || 254 // We normally only transform phis with a single use, unless we're trying 255 // hard to make jump threading happen. 256 (!PN->hasOneUse() && !AllowAggressive)) 257 return 0; 258 259 260 // Check to see if all of the operands of the PHI are simple constants 261 // (constantint/constantfp/undef). If there is one non-constant value, 262 // remember the BB it is in. If there is more than one or if *it* is a PHI, 263 // bail out. We don't do arbitrary constant expressions here because moving 264 // their computation can be expensive without a cost model. 265 BasicBlock *NonConstBB = 0; 266 for (unsigned i = 0; i != NumPHIValues; ++i) 267 if (!isa<Constant>(PN->getIncomingValue(i)) || 268 isa<ConstantExpr>(PN->getIncomingValue(i))) { 269 if (NonConstBB) return 0; // More than one non-const value. 270 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi. 271 NonConstBB = PN->getIncomingBlock(i); 272 273 // If the incoming non-constant value is in I's block, we have an infinite 274 // loop. 275 if (NonConstBB == I.getParent()) 276 return 0; 277 } 278 279 // If there is exactly one non-constant value, we can insert a copy of the 280 // operation in that block. However, if this is a critical edge, we would be 281 // inserting the computation one some other paths (e.g. inside a loop). Only 282 // do this if the pred block is unconditionally branching into the phi block. 283 if (NonConstBB != 0 && !AllowAggressive) { 284 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 285 if (!BI || !BI->isUnconditional()) return 0; 286 } 287 288 // Okay, we can do the transformation: create the new PHI node. 289 PHINode *NewPN = PHINode::Create(I.getType(), ""); 290 NewPN->reserveOperandSpace(PN->getNumOperands()/2); 291 InsertNewInstBefore(NewPN, *PN); 292 NewPN->takeName(PN); 293 294 // Next, add all of the operands to the PHI. 295 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 296 // We only currently try to fold the condition of a select when it is a phi, 297 // not the true/false values. 298 Value *TrueV = SI->getTrueValue(); 299 Value *FalseV = SI->getFalseValue(); 300 BasicBlock *PhiTransBB = PN->getParent(); 301 for (unsigned i = 0; i != NumPHIValues; ++i) { 302 BasicBlock *ThisBB = PN->getIncomingBlock(i); 303 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 304 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 305 Value *InV = 0; 306 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) { 307 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 308 } else { 309 assert(PN->getIncomingBlock(i) == NonConstBB); 310 InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred, 311 FalseVInPred, 312 "phitmp", NonConstBB->getTerminator()); 313 Worklist.Add(cast<Instruction>(InV)); 314 } 315 NewPN->addIncoming(InV, ThisBB); 316 } 317 } else if (I.getNumOperands() == 2) { 318 Constant *C = cast<Constant>(I.getOperand(1)); 319 for (unsigned i = 0; i != NumPHIValues; ++i) { 320 Value *InV = 0; 321 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) { 322 if (CmpInst *CI = dyn_cast<CmpInst>(&I)) 323 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 324 else 325 InV = ConstantExpr::get(I.getOpcode(), InC, C); 326 } else { 327 assert(PN->getIncomingBlock(i) == NonConstBB); 328 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 329 InV = BinaryOperator::Create(BO->getOpcode(), 330 PN->getIncomingValue(i), C, "phitmp", 331 NonConstBB->getTerminator()); 332 else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) 333 InV = CmpInst::Create(CI->getOpcode(), 334 CI->getPredicate(), 335 PN->getIncomingValue(i), C, "phitmp", 336 NonConstBB->getTerminator()); 337 else 338 llvm_unreachable("Unknown binop!"); 339 340 Worklist.Add(cast<Instruction>(InV)); 341 } 342 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 343 } 344 } else { 345 CastInst *CI = cast<CastInst>(&I); 346 const Type *RetTy = CI->getType(); 347 for (unsigned i = 0; i != NumPHIValues; ++i) { 348 Value *InV; 349 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) { 350 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 351 } else { 352 assert(PN->getIncomingBlock(i) == NonConstBB); 353 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i), 354 I.getType(), "phitmp", 355 NonConstBB->getTerminator()); 356 Worklist.Add(cast<Instruction>(InV)); 357 } 358 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 359 } 360 } 361 return ReplaceInstUsesWith(I, NewPN); 362} 363 364/// FindElementAtOffset - Given a type and a constant offset, determine whether 365/// or not there is a sequence of GEP indices into the type that will land us at 366/// the specified offset. If so, fill them into NewIndices and return the 367/// resultant element type, otherwise return null. 368const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset, 369 SmallVectorImpl<Value*> &NewIndices) { 370 if (!TD) return 0; 371 if (!Ty->isSized()) return 0; 372 373 // Start with the index over the outer type. Note that the type size 374 // might be zero (even if the offset isn't zero) if the indexed type 375 // is something like [0 x {int, int}] 376 const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 377 int64_t FirstIdx = 0; 378 if (int64_t TySize = TD->getTypeAllocSize(Ty)) { 379 FirstIdx = Offset/TySize; 380 Offset -= FirstIdx*TySize; 381 382 // Handle hosts where % returns negative instead of values [0..TySize). 383 if (Offset < 0) { 384 --FirstIdx; 385 Offset += TySize; 386 assert(Offset >= 0); 387 } 388 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 389 } 390 391 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 392 393 // Index into the types. If we fail, set OrigBase to null. 394 while (Offset) { 395 // Indexing into tail padding between struct/array elements. 396 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty)) 397 return 0; 398 399 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 400 const StructLayout *SL = TD->getStructLayout(STy); 401 assert(Offset < (int64_t)SL->getSizeInBytes() && 402 "Offset must stay within the indexed type"); 403 404 unsigned Elt = SL->getElementContainingOffset(Offset); 405 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 406 Elt)); 407 408 Offset -= SL->getElementOffset(Elt); 409 Ty = STy->getElementType(Elt); 410 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 411 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); 412 assert(EltSize && "Cannot index into a zero-sized array"); 413 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 414 Offset %= EltSize; 415 Ty = AT->getElementType(); 416 } else { 417 // Otherwise, we can't index into the middle of this atomic type, bail. 418 return 0; 419 } 420 } 421 422 return Ty; 423} 424 425 426 427Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 428 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 429 430 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD)) 431 return ReplaceInstUsesWith(GEP, V); 432 433 Value *PtrOp = GEP.getOperand(0); 434 435 if (isa<UndefValue>(GEP.getOperand(0))) 436 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType())); 437 438 // Eliminate unneeded casts for indices. 439 if (TD) { 440 bool MadeChange = false; 441 unsigned PtrSize = TD->getPointerSizeInBits(); 442 443 gep_type_iterator GTI = gep_type_begin(GEP); 444 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 445 I != E; ++I, ++GTI) { 446 if (!isa<SequentialType>(*GTI)) continue; 447 448 // If we are using a wider index than needed for this platform, shrink it 449 // to what we need. If narrower, sign-extend it to what we need. This 450 // explicit cast can make subsequent optimizations more obvious. 451 unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth(); 452 if (OpBits == PtrSize) 453 continue; 454 455 *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true); 456 MadeChange = true; 457 } 458 if (MadeChange) return &GEP; 459 } 460 461 // Combine Indices - If the source pointer to this getelementptr instruction 462 // is a getelementptr instruction, combine the indices of the two 463 // getelementptr instructions into a single instruction. 464 // 465 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 466 // Note that if our source is a gep chain itself that we wait for that 467 // chain to be resolved before we perform this transformation. This 468 // avoids us creating a TON of code in some cases. 469 // 470 if (GetElementPtrInst *SrcGEP = 471 dyn_cast<GetElementPtrInst>(Src->getOperand(0))) 472 if (SrcGEP->getNumOperands() == 2) 473 return 0; // Wait until our source is folded to completion. 474 475 SmallVector<Value*, 8> Indices; 476 477 // Find out whether the last index in the source GEP is a sequential idx. 478 bool EndsWithSequential = false; 479 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 480 I != E; ++I) 481 EndsWithSequential = !(*I)->isStructTy(); 482 483 // Can we combine the two pointer arithmetics offsets? 484 if (EndsWithSequential) { 485 // Replace: gep (gep %P, long B), long A, ... 486 // With: T = long A+B; gep %P, T, ... 487 // 488 Value *Sum; 489 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 490 Value *GO1 = GEP.getOperand(1); 491 if (SO1 == Constant::getNullValue(SO1->getType())) { 492 Sum = GO1; 493 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 494 Sum = SO1; 495 } else { 496 // If they aren't the same type, then the input hasn't been processed 497 // by the loop above yet (which canonicalizes sequential index types to 498 // intptr_t). Just avoid transforming this until the input has been 499 // normalized. 500 if (SO1->getType() != GO1->getType()) 501 return 0; 502 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 503 } 504 505 // Update the GEP in place if possible. 506 if (Src->getNumOperands() == 2) { 507 GEP.setOperand(0, Src->getOperand(0)); 508 GEP.setOperand(1, Sum); 509 return &GEP; 510 } 511 Indices.append(Src->op_begin()+1, Src->op_end()-1); 512 Indices.push_back(Sum); 513 Indices.append(GEP.op_begin()+2, GEP.op_end()); 514 } else if (isa<Constant>(*GEP.idx_begin()) && 515 cast<Constant>(*GEP.idx_begin())->isNullValue() && 516 Src->getNumOperands() != 1) { 517 // Otherwise we can do the fold if the first index of the GEP is a zero 518 Indices.append(Src->op_begin()+1, Src->op_end()); 519 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 520 } 521 522 if (!Indices.empty()) 523 return (GEP.isInBounds() && Src->isInBounds()) ? 524 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(), 525 Indices.end(), GEP.getName()) : 526 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(), 527 Indices.end(), GEP.getName()); 528 } 529 530 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 531 Value *StrippedPtr = PtrOp->stripPointerCasts(); 532 if (StrippedPtr != PtrOp) { 533 const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); 534 535 bool HasZeroPointerIndex = false; 536 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 537 HasZeroPointerIndex = C->isZero(); 538 539 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 540 // into : GEP [10 x i8]* X, i32 0, ... 541 // 542 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 543 // into : GEP i8* X, ... 544 // 545 // This occurs when the program declares an array extern like "int X[];" 546 if (HasZeroPointerIndex) { 547 const PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 548 if (const ArrayType *CATy = 549 dyn_cast<ArrayType>(CPTy->getElementType())) { 550 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 551 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 552 // -> GEP i8* X, ... 553 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 554 GetElementPtrInst *Res = 555 GetElementPtrInst::Create(StrippedPtr, Idx.begin(), 556 Idx.end(), GEP.getName()); 557 Res->setIsInBounds(GEP.isInBounds()); 558 return Res; 559 } 560 561 if (const ArrayType *XATy = 562 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 563 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 564 if (CATy->getElementType() == XATy->getElementType()) { 565 // -> GEP [10 x i8]* X, i32 0, ... 566 // At this point, we know that the cast source type is a pointer 567 // to an array of the same type as the destination pointer 568 // array. Because the array type is never stepped over (there 569 // is a leading zero) we can fold the cast into this GEP. 570 GEP.setOperand(0, StrippedPtr); 571 return &GEP; 572 } 573 } 574 } 575 } else if (GEP.getNumOperands() == 2) { 576 // Transform things like: 577 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 578 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 579 const Type *SrcElTy = StrippedPtrTy->getElementType(); 580 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); 581 if (TD && SrcElTy->isArrayTy() && 582 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == 583 TD->getTypeAllocSize(ResElTy)) { 584 Value *Idx[2]; 585 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 586 Idx[1] = GEP.getOperand(1); 587 Value *NewGEP = GEP.isInBounds() ? 588 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) : 589 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 590 // V and GEP are both pointer types --> BitCast 591 return new BitCastInst(NewGEP, GEP.getType()); 592 } 593 594 // Transform things like: 595 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 596 // (where tmp = 8*tmp2) into: 597 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 598 599 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { 600 uint64_t ArrayEltSize = 601 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); 602 603 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We 604 // allow either a mul, shift, or constant here. 605 Value *NewIdx = 0; 606 ConstantInt *Scale = 0; 607 if (ArrayEltSize == 1) { 608 NewIdx = GEP.getOperand(1); 609 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); 610 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) { 611 NewIdx = ConstantInt::get(CI->getType(), 1); 612 Scale = CI; 613 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){ 614 if (Inst->getOpcode() == Instruction::Shl && 615 isa<ConstantInt>(Inst->getOperand(1))) { 616 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); 617 uint32_t ShAmtVal = ShAmt->getLimitedValue(64); 618 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 619 1ULL << ShAmtVal); 620 NewIdx = Inst->getOperand(0); 621 } else if (Inst->getOpcode() == Instruction::Mul && 622 isa<ConstantInt>(Inst->getOperand(1))) { 623 Scale = cast<ConstantInt>(Inst->getOperand(1)); 624 NewIdx = Inst->getOperand(0); 625 } 626 } 627 628 // If the index will be to exactly the right offset with the scale taken 629 // out, perform the transformation. Note, we don't know whether Scale is 630 // signed or not. We'll use unsigned version of division/modulo 631 // operation after making sure Scale doesn't have the sign bit set. 632 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && 633 Scale->getZExtValue() % ArrayEltSize == 0) { 634 Scale = ConstantInt::get(Scale->getType(), 635 Scale->getZExtValue() / ArrayEltSize); 636 if (Scale->getZExtValue() != 1) { 637 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), 638 false /*ZExt*/); 639 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); 640 } 641 642 // Insert the new GEP instruction. 643 Value *Idx[2]; 644 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 645 Idx[1] = NewIdx; 646 Value *NewGEP = GEP.isInBounds() ? 647 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()): 648 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 649 // The NewGEP must be pointer typed, so must the old one -> BitCast 650 return new BitCastInst(NewGEP, GEP.getType()); 651 } 652 } 653 } 654 } 655 656 /// See if we can simplify: 657 /// X = bitcast A* to B* 658 /// Y = gep X, <...constant indices...> 659 /// into a gep of the original struct. This is important for SROA and alias 660 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 661 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 662 if (TD && 663 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) { 664 // Determine how much the GEP moves the pointer. We are guaranteed to get 665 // a constant back from EmitGEPOffset. 666 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP)); 667 int64_t Offset = OffsetV->getSExtValue(); 668 669 // If this GEP instruction doesn't move the pointer, just replace the GEP 670 // with a bitcast of the real input to the dest type. 671 if (Offset == 0) { 672 // If the bitcast is of an allocation, and the allocation will be 673 // converted to match the type of the cast, don't touch this. 674 if (isa<AllocaInst>(BCI->getOperand(0)) || 675 isMalloc(BCI->getOperand(0))) { 676 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 677 if (Instruction *I = visitBitCast(*BCI)) { 678 if (I != BCI) { 679 I->takeName(BCI); 680 BCI->getParent()->getInstList().insert(BCI, I); 681 ReplaceInstUsesWith(*BCI, I); 682 } 683 return &GEP; 684 } 685 } 686 return new BitCastInst(BCI->getOperand(0), GEP.getType()); 687 } 688 689 // Otherwise, if the offset is non-zero, we need to find out if there is a 690 // field at Offset in 'A's type. If so, we can pull the cast through the 691 // GEP. 692 SmallVector<Value*, 8> NewIndices; 693 const Type *InTy = 694 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType(); 695 if (FindElementAtOffset(InTy, Offset, NewIndices)) { 696 Value *NGEP = GEP.isInBounds() ? 697 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(), 698 NewIndices.end()) : 699 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(), 700 NewIndices.end()); 701 702 if (NGEP->getType() == GEP.getType()) 703 return ReplaceInstUsesWith(GEP, NGEP); 704 NGEP->takeName(&GEP); 705 return new BitCastInst(NGEP, GEP.getType()); 706 } 707 } 708 } 709 710 return 0; 711} 712 713Instruction *InstCombiner::visitFree(Instruction &FI) { 714 Value *Op = FI.getOperand(0); 715 716 // free undef -> unreachable. 717 if (isa<UndefValue>(Op)) { 718 // Insert a new store to null because we cannot modify the CFG here. 719 new StoreInst(ConstantInt::getTrue(FI.getContext()), 720 UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI); 721 return EraseInstFromFunction(FI); 722 } 723 724 // If we have 'free null' delete the instruction. This can happen in stl code 725 // when lots of inlining happens. 726 if (isa<ConstantPointerNull>(Op)) 727 return EraseInstFromFunction(FI); 728 729 // If we have a malloc call whose only use is a free call, delete both. 730 if (isMalloc(Op)) { 731 if (CallInst* CI = extractMallocCallFromBitCast(Op)) { 732 if (Op->hasOneUse() && CI->hasOneUse()) { 733 EraseInstFromFunction(FI); 734 EraseInstFromFunction(*CI); 735 return EraseInstFromFunction(*cast<Instruction>(Op)); 736 } 737 } else { 738 // Op is a call to malloc 739 if (Op->hasOneUse()) { 740 EraseInstFromFunction(FI); 741 return EraseInstFromFunction(*cast<Instruction>(Op)); 742 } 743 } 744 } 745 746 return 0; 747} 748 749 750 751Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 752 // Change br (not X), label True, label False to: br X, label False, True 753 Value *X = 0; 754 BasicBlock *TrueDest; 755 BasicBlock *FalseDest; 756 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 757 !isa<Constant>(X)) { 758 // Swap Destinations and condition... 759 BI.setCondition(X); 760 BI.setSuccessor(0, FalseDest); 761 BI.setSuccessor(1, TrueDest); 762 return &BI; 763 } 764 765 // Cannonicalize fcmp_one -> fcmp_oeq 766 FCmpInst::Predicate FPred; Value *Y; 767 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 768 TrueDest, FalseDest)) && 769 BI.getCondition()->hasOneUse()) 770 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 771 FPred == FCmpInst::FCMP_OGE) { 772 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 773 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 774 775 // Swap Destinations and condition. 776 BI.setSuccessor(0, FalseDest); 777 BI.setSuccessor(1, TrueDest); 778 Worklist.Add(Cond); 779 return &BI; 780 } 781 782 // Cannonicalize icmp_ne -> icmp_eq 783 ICmpInst::Predicate IPred; 784 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 785 TrueDest, FalseDest)) && 786 BI.getCondition()->hasOneUse()) 787 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 788 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 789 IPred == ICmpInst::ICMP_SGE) { 790 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 791 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 792 // Swap Destinations and condition. 793 BI.setSuccessor(0, FalseDest); 794 BI.setSuccessor(1, TrueDest); 795 Worklist.Add(Cond); 796 return &BI; 797 } 798 799 return 0; 800} 801 802Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 803 Value *Cond = SI.getCondition(); 804 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 805 if (I->getOpcode() == Instruction::Add) 806 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 807 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 808 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2) 809 SI.setOperand(i, 810 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)), 811 AddRHS)); 812 SI.setOperand(0, I->getOperand(0)); 813 Worklist.Add(I); 814 return &SI; 815 } 816 } 817 return 0; 818} 819 820Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 821 Value *Agg = EV.getAggregateOperand(); 822 823 if (!EV.hasIndices()) 824 return ReplaceInstUsesWith(EV, Agg); 825 826 if (Constant *C = dyn_cast<Constant>(Agg)) { 827 if (isa<UndefValue>(C)) 828 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); 829 830 if (isa<ConstantAggregateZero>(C)) 831 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); 832 833 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 834 // Extract the element indexed by the first index out of the constant 835 Value *V = C->getOperand(*EV.idx_begin()); 836 if (EV.getNumIndices() > 1) 837 // Extract the remaining indices out of the constant indexed by the 838 // first index 839 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end()); 840 else 841 return ReplaceInstUsesWith(EV, V); 842 } 843 return 0; // Can't handle other constants 844 } 845 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 846 // We're extracting from an insertvalue instruction, compare the indices 847 const unsigned *exti, *exte, *insi, *inse; 848 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 849 exte = EV.idx_end(), inse = IV->idx_end(); 850 exti != exte && insi != inse; 851 ++exti, ++insi) { 852 if (*insi != *exti) 853 // The insert and extract both reference distinctly different elements. 854 // This means the extract is not influenced by the insert, and we can 855 // replace the aggregate operand of the extract with the aggregate 856 // operand of the insert. i.e., replace 857 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 858 // %E = extractvalue { i32, { i32 } } %I, 0 859 // with 860 // %E = extractvalue { i32, { i32 } } %A, 0 861 return ExtractValueInst::Create(IV->getAggregateOperand(), 862 EV.idx_begin(), EV.idx_end()); 863 } 864 if (exti == exte && insi == inse) 865 // Both iterators are at the end: Index lists are identical. Replace 866 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 867 // %C = extractvalue { i32, { i32 } } %B, 1, 0 868 // with "i32 42" 869 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 870 if (exti == exte) { 871 // The extract list is a prefix of the insert list. i.e. replace 872 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 873 // %E = extractvalue { i32, { i32 } } %I, 1 874 // with 875 // %X = extractvalue { i32, { i32 } } %A, 1 876 // %E = insertvalue { i32 } %X, i32 42, 0 877 // by switching the order of the insert and extract (though the 878 // insertvalue should be left in, since it may have other uses). 879 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 880 EV.idx_begin(), EV.idx_end()); 881 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 882 insi, inse); 883 } 884 if (insi == inse) 885 // The insert list is a prefix of the extract list 886 // We can simply remove the common indices from the extract and make it 887 // operate on the inserted value instead of the insertvalue result. 888 // i.e., replace 889 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 890 // %E = extractvalue { i32, { i32 } } %I, 1, 0 891 // with 892 // %E extractvalue { i32 } { i32 42 }, 0 893 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 894 exti, exte); 895 } 896 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 897 // We're extracting from an intrinsic, see if we're the only user, which 898 // allows us to simplify multiple result intrinsics to simpler things that 899 // just get one value. 900 if (II->hasOneUse()) { 901 // Check if we're grabbing the overflow bit or the result of a 'with 902 // overflow' intrinsic. If it's the latter we can remove the intrinsic 903 // and replace it with a traditional binary instruction. 904 switch (II->getIntrinsicID()) { 905 case Intrinsic::uadd_with_overflow: 906 case Intrinsic::sadd_with_overflow: 907 if (*EV.idx_begin() == 0) { // Normal result. 908 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1); 909 II->replaceAllUsesWith(UndefValue::get(II->getType())); 910 EraseInstFromFunction(*II); 911 return BinaryOperator::CreateAdd(LHS, RHS); 912 } 913 break; 914 case Intrinsic::usub_with_overflow: 915 case Intrinsic::ssub_with_overflow: 916 if (*EV.idx_begin() == 0) { // Normal result. 917 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1); 918 II->replaceAllUsesWith(UndefValue::get(II->getType())); 919 EraseInstFromFunction(*II); 920 return BinaryOperator::CreateSub(LHS, RHS); 921 } 922 break; 923 case Intrinsic::umul_with_overflow: 924 case Intrinsic::smul_with_overflow: 925 if (*EV.idx_begin() == 0) { // Normal result. 926 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1); 927 II->replaceAllUsesWith(UndefValue::get(II->getType())); 928 EraseInstFromFunction(*II); 929 return BinaryOperator::CreateMul(LHS, RHS); 930 } 931 break; 932 default: 933 break; 934 } 935 } 936 } 937 // Can't simplify extracts from other values. Note that nested extracts are 938 // already simplified implicitely by the above (extract ( extract (insert) ) 939 // will be translated into extract ( insert ( extract ) ) first and then just 940 // the value inserted, if appropriate). 941 return 0; 942} 943 944 945 946 947/// TryToSinkInstruction - Try to move the specified instruction from its 948/// current block into the beginning of DestBlock, which can only happen if it's 949/// safe to move the instruction past all of the instructions between it and the 950/// end of its block. 951static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 952 assert(I->hasOneUse() && "Invariants didn't hold!"); 953 954 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 955 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I)) 956 return false; 957 958 // Do not sink alloca instructions out of the entry block. 959 if (isa<AllocaInst>(I) && I->getParent() == 960 &DestBlock->getParent()->getEntryBlock()) 961 return false; 962 963 // We can only sink load instructions if there is nothing between the load and 964 // the end of block that could change the value. 965 if (I->mayReadFromMemory()) { 966 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 967 Scan != E; ++Scan) 968 if (Scan->mayWriteToMemory()) 969 return false; 970 } 971 972 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI(); 973 974 I->moveBefore(InsertPos); 975 ++NumSunkInst; 976 return true; 977} 978 979 980/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 981/// all reachable code to the worklist. 982/// 983/// This has a couple of tricks to make the code faster and more powerful. In 984/// particular, we constant fold and DCE instructions as we go, to avoid adding 985/// them to the worklist (this significantly speeds up instcombine on code where 986/// many instructions are dead or constant). Additionally, if we find a branch 987/// whose condition is a known constant, we only visit the reachable successors. 988/// 989static bool AddReachableCodeToWorklist(BasicBlock *BB, 990 SmallPtrSet<BasicBlock*, 64> &Visited, 991 InstCombiner &IC, 992 const TargetData *TD) { 993 bool MadeIRChange = false; 994 SmallVector<BasicBlock*, 256> Worklist; 995 Worklist.push_back(BB); 996 997 std::vector<Instruction*> InstrsForInstCombineWorklist; 998 InstrsForInstCombineWorklist.reserve(128); 999 1000 SmallPtrSet<ConstantExpr*, 64> FoldedConstants; 1001 1002 do { 1003 BB = Worklist.pop_back_val(); 1004 1005 // We have now visited this block! If we've already been here, ignore it. 1006 if (!Visited.insert(BB)) continue; 1007 1008 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 1009 Instruction *Inst = BBI++; 1010 1011 // DCE instruction if trivially dead. 1012 if (isInstructionTriviallyDead(Inst)) { 1013 ++NumDeadInst; 1014 DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); 1015 Inst->eraseFromParent(); 1016 continue; 1017 } 1018 1019 // ConstantProp instruction if trivially constant. 1020 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 1021 if (Constant *C = ConstantFoldInstruction(Inst, TD)) { 1022 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " 1023 << *Inst << '\n'); 1024 Inst->replaceAllUsesWith(C); 1025 ++NumConstProp; 1026 Inst->eraseFromParent(); 1027 continue; 1028 } 1029 1030 if (TD) { 1031 // See if we can constant fold its operands. 1032 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 1033 i != e; ++i) { 1034 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 1035 if (CE == 0) continue; 1036 1037 // If we already folded this constant, don't try again. 1038 if (!FoldedConstants.insert(CE)) 1039 continue; 1040 1041 Constant *NewC = ConstantFoldConstantExpression(CE, TD); 1042 if (NewC && NewC != CE) { 1043 *i = NewC; 1044 MadeIRChange = true; 1045 } 1046 } 1047 } 1048 1049 InstrsForInstCombineWorklist.push_back(Inst); 1050 } 1051 1052 // Recursively visit successors. If this is a branch or switch on a 1053 // constant, only visit the reachable successor. 1054 TerminatorInst *TI = BB->getTerminator(); 1055 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1056 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 1057 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 1058 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 1059 Worklist.push_back(ReachableBB); 1060 continue; 1061 } 1062 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1063 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 1064 // See if this is an explicit destination. 1065 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) 1066 if (SI->getCaseValue(i) == Cond) { 1067 BasicBlock *ReachableBB = SI->getSuccessor(i); 1068 Worklist.push_back(ReachableBB); 1069 continue; 1070 } 1071 1072 // Otherwise it is the default destination. 1073 Worklist.push_back(SI->getSuccessor(0)); 1074 continue; 1075 } 1076 } 1077 1078 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 1079 Worklist.push_back(TI->getSuccessor(i)); 1080 } while (!Worklist.empty()); 1081 1082 // Once we've found all of the instructions to add to instcombine's worklist, 1083 // add them in reverse order. This way instcombine will visit from the top 1084 // of the function down. This jives well with the way that it adds all uses 1085 // of instructions to the worklist after doing a transformation, thus avoiding 1086 // some N^2 behavior in pathological cases. 1087 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 1088 InstrsForInstCombineWorklist.size()); 1089 1090 return MadeIRChange; 1091} 1092 1093bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 1094 MadeIRChange = false; 1095 1096 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 1097 << F.getNameStr() << "\n"); 1098 1099 { 1100 // Do a depth-first traversal of the function, populate the worklist with 1101 // the reachable instructions. Ignore blocks that are not reachable. Keep 1102 // track of which blocks we visit. 1103 SmallPtrSet<BasicBlock*, 64> Visited; 1104 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD); 1105 1106 // Do a quick scan over the function. If we find any blocks that are 1107 // unreachable, remove any instructions inside of them. This prevents 1108 // the instcombine code from having to deal with some bad special cases. 1109 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 1110 if (!Visited.count(BB)) { 1111 Instruction *Term = BB->getTerminator(); 1112 while (Term != BB->begin()) { // Remove instrs bottom-up 1113 BasicBlock::iterator I = Term; --I; 1114 1115 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1116 // A debug intrinsic shouldn't force another iteration if we weren't 1117 // going to do one without it. 1118 if (!isa<DbgInfoIntrinsic>(I)) { 1119 ++NumDeadInst; 1120 MadeIRChange = true; 1121 } 1122 1123 // If I is not void type then replaceAllUsesWith undef. 1124 // This allows ValueHandlers and custom metadata to adjust itself. 1125 if (!I->getType()->isVoidTy()) 1126 I->replaceAllUsesWith(UndefValue::get(I->getType())); 1127 I->eraseFromParent(); 1128 } 1129 } 1130 } 1131 1132 while (!Worklist.isEmpty()) { 1133 Instruction *I = Worklist.RemoveOne(); 1134 if (I == 0) continue; // skip null values. 1135 1136 // Check to see if we can DCE the instruction. 1137 if (isInstructionTriviallyDead(I)) { 1138 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1139 EraseInstFromFunction(*I); 1140 ++NumDeadInst; 1141 MadeIRChange = true; 1142 continue; 1143 } 1144 1145 // Instruction isn't dead, see if we can constant propagate it. 1146 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 1147 if (Constant *C = ConstantFoldInstruction(I, TD)) { 1148 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 1149 1150 // Add operands to the worklist. 1151 ReplaceInstUsesWith(*I, C); 1152 ++NumConstProp; 1153 EraseInstFromFunction(*I); 1154 MadeIRChange = true; 1155 continue; 1156 } 1157 1158 // See if we can trivially sink this instruction to a successor basic block. 1159 if (I->hasOneUse()) { 1160 BasicBlock *BB = I->getParent(); 1161 Instruction *UserInst = cast<Instruction>(I->use_back()); 1162 BasicBlock *UserParent; 1163 1164 // Get the block the use occurs in. 1165 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 1166 UserParent = PN->getIncomingBlock(I->use_begin().getUse()); 1167 else 1168 UserParent = UserInst->getParent(); 1169 1170 if (UserParent != BB) { 1171 bool UserIsSuccessor = false; 1172 // See if the user is one of our successors. 1173 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 1174 if (*SI == UserParent) { 1175 UserIsSuccessor = true; 1176 break; 1177 } 1178 1179 // If the user is one of our immediate successors, and if that successor 1180 // only has us as a predecessors (we'd have to split the critical edge 1181 // otherwise), we can keep going. 1182 if (UserIsSuccessor && UserParent->getSinglePredecessor()) 1183 // Okay, the CFG is simple enough, try to sink this instruction. 1184 MadeIRChange |= TryToSinkInstruction(I, UserParent); 1185 } 1186 } 1187 1188 // Now that we have an instruction, try combining it to simplify it. 1189 Builder->SetInsertPoint(I->getParent(), I); 1190 1191#ifndef NDEBUG 1192 std::string OrigI; 1193#endif 1194 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 1195 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n'); 1196 1197 if (Instruction *Result = visit(*I)) { 1198 ++NumCombined; 1199 // Should we replace the old instruction with a new one? 1200 if (Result != I) { 1201 DEBUG(errs() << "IC: Old = " << *I << '\n' 1202 << " New = " << *Result << '\n'); 1203 1204 // Everything uses the new instruction now. 1205 I->replaceAllUsesWith(Result); 1206 1207 // Push the new instruction and any users onto the worklist. 1208 Worklist.Add(Result); 1209 Worklist.AddUsersToWorkList(*Result); 1210 1211 // Move the name to the new instruction first. 1212 Result->takeName(I); 1213 1214 // Insert the new instruction into the basic block... 1215 BasicBlock *InstParent = I->getParent(); 1216 BasicBlock::iterator InsertPos = I; 1217 1218 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert 1219 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs. 1220 ++InsertPos; 1221 1222 InstParent->getInstList().insert(InsertPos, Result); 1223 1224 EraseInstFromFunction(*I); 1225 } else { 1226#ifndef NDEBUG 1227 DEBUG(errs() << "IC: Mod = " << OrigI << '\n' 1228 << " New = " << *I << '\n'); 1229#endif 1230 1231 // If the instruction was modified, it's possible that it is now dead. 1232 // if so, remove it. 1233 if (isInstructionTriviallyDead(I)) { 1234 EraseInstFromFunction(*I); 1235 } else { 1236 Worklist.Add(I); 1237 Worklist.AddUsersToWorkList(*I); 1238 } 1239 } 1240 MadeIRChange = true; 1241 } 1242 } 1243 1244 Worklist.Zap(); 1245 return MadeIRChange; 1246} 1247 1248 1249bool InstCombiner::runOnFunction(Function &F) { 1250 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID); 1251 TD = getAnalysisIfAvailable<TargetData>(); 1252 1253 1254 /// Builder - This is an IRBuilder that automatically inserts new 1255 /// instructions into the worklist when they are created. 1256 IRBuilder<true, TargetFolder, InstCombineIRInserter> 1257 TheBuilder(F.getContext(), TargetFolder(TD), 1258 InstCombineIRInserter(Worklist)); 1259 Builder = &TheBuilder; 1260 1261 bool EverMadeChange = false; 1262 1263 // Iterate while there is work to do. 1264 unsigned Iteration = 0; 1265 while (DoOneIteration(F, Iteration++)) 1266 EverMadeChange = true; 1267 1268 Builder = 0; 1269 return EverMadeChange; 1270} 1271 1272FunctionPass *llvm::createInstructionCombiningPass() { 1273 return new InstCombiner(); 1274} 1275