InstructionCombining.cpp revision a8d1393093d67091d47fa87a4ce86c0adcead6a0
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// InstructionCombining - Combine instructions to form fewer, simple 11// instructions. This pass does not modify the CFG. This pass is where 12// algebraic simplification happens. 13// 14// This pass combines things like: 15// %Y = add i32 %X, 1 16// %Z = add i32 %Y, 1 17// into: 18// %Z = add i32 %X, 2 19// 20// This is a simple worklist driven algorithm. 21// 22// This pass guarantees that the following canonicalizations are performed on 23// the program: 24// 1. If a binary operator has a constant operand, it is moved to the RHS 25// 2. Bitwise operators with constant operands are always grouped so that 26// shifts are performed first, then or's, then and's, then xor's. 27// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 28// 4. All cmp instructions on boolean values are replaced with logical ops 29// 5. add X, X is represented as (X*2) => (X << 1) 30// 6. Multiplies with a power-of-two constant argument are transformed into 31// shifts. 32// ... etc. 33// 34//===----------------------------------------------------------------------===// 35 36#define DEBUG_TYPE "instcombine" 37#include "llvm/Transforms/Scalar.h" 38#include "InstCombine.h" 39#include "llvm/IntrinsicInst.h" 40#include "llvm/Analysis/ConstantFolding.h" 41#include "llvm/Analysis/InstructionSimplify.h" 42#include "llvm/Analysis/MemoryBuiltins.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Transforms/Utils/Local.h" 45#include "llvm/Support/CFG.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/GetElementPtrTypeIterator.h" 48#include "llvm/Support/PatternMatch.h" 49#include "llvm/Support/ValueHandle.h" 50#include "llvm/ADT/SmallPtrSet.h" 51#include "llvm/ADT/Statistic.h" 52#include "llvm-c/Initialization.h" 53#include <algorithm> 54#include <climits> 55using namespace llvm; 56using namespace llvm::PatternMatch; 57 58STATISTIC(NumCombined , "Number of insts combined"); 59STATISTIC(NumConstProp, "Number of constant folds"); 60STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 61STATISTIC(NumSunkInst , "Number of instructions sunk"); 62STATISTIC(NumExpand, "Number of expansions"); 63STATISTIC(NumFactor , "Number of factorizations"); 64STATISTIC(NumReassoc , "Number of reassociations"); 65 66// Initialization Routines 67void llvm::initializeInstCombine(PassRegistry &Registry) { 68 initializeInstCombinerPass(Registry); 69} 70 71void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 72 initializeInstCombine(*unwrap(R)); 73} 74 75char InstCombiner::ID = 0; 76INITIALIZE_PASS(InstCombiner, "instcombine", 77 "Combine redundant instructions", false, false) 78 79void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 80 AU.setPreservesCFG(); 81} 82 83 84/// ShouldChangeType - Return true if it is desirable to convert a computation 85/// from 'From' to 'To'. We don't want to convert from a legal to an illegal 86/// type for example, or from a smaller to a larger illegal type. 87bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { 88 assert(From->isIntegerTy() && To->isIntegerTy()); 89 90 // If we don't have TD, we don't know if the source/dest are legal. 91 if (!TD) return false; 92 93 unsigned FromWidth = From->getPrimitiveSizeInBits(); 94 unsigned ToWidth = To->getPrimitiveSizeInBits(); 95 bool FromLegal = TD->isLegalInteger(FromWidth); 96 bool ToLegal = TD->isLegalInteger(ToWidth); 97 98 // If this is a legal integer from type, and the result would be an illegal 99 // type, don't do the transformation. 100 if (FromLegal && !ToLegal) 101 return false; 102 103 // Otherwise, if both are illegal, do not increase the size of the result. We 104 // do allow things like i160 -> i64, but not i64 -> i160. 105 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 106 return false; 107 108 return true; 109} 110 111// Return true, if No Signed Wrap should be maintained for I. 112// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", 113// where both B and C should be ConstantInts, results in a constant that does 114// not overflow. This function only handles the Add and Sub opcodes. For 115// all other opcodes, the function conservatively returns false. 116static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { 117 OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 118 if (!OBO || !OBO->hasNoSignedWrap()) { 119 return false; 120 } 121 122 // We reason about Add and Sub Only. 123 Instruction::BinaryOps Opcode = I.getOpcode(); 124 if (Opcode != Instruction::Add && 125 Opcode != Instruction::Sub) { 126 return false; 127 } 128 129 ConstantInt *CB = dyn_cast<ConstantInt>(B); 130 ConstantInt *CC = dyn_cast<ConstantInt>(C); 131 132 if (!CB || !CC) { 133 return false; 134 } 135 136 const APInt &BVal = CB->getValue(); 137 const APInt &CVal = CC->getValue(); 138 bool Overflow = false; 139 140 if (Opcode == Instruction::Add) { 141 BVal.sadd_ov(CVal, Overflow); 142 } else { 143 BVal.ssub_ov(CVal, Overflow); 144 } 145 146 return !Overflow; 147} 148 149/// SimplifyAssociativeOrCommutative - This performs a few simplifications for 150/// operators which are associative or commutative: 151// 152// Commutative operators: 153// 154// 1. Order operands such that they are listed from right (least complex) to 155// left (most complex). This puts constants before unary operators before 156// binary operators. 157// 158// Associative operators: 159// 160// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 161// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 162// 163// Associative and commutative operators: 164// 165// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 166// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 167// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 168// if C1 and C2 are constants. 169// 170bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 171 Instruction::BinaryOps Opcode = I.getOpcode(); 172 bool Changed = false; 173 174 do { 175 // Order operands such that they are listed from right (least complex) to 176 // left (most complex). This puts constants before unary operators before 177 // binary operators. 178 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 179 getComplexity(I.getOperand(1))) 180 Changed = !I.swapOperands(); 181 182 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 183 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 184 185 if (I.isAssociative()) { 186 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 187 if (Op0 && Op0->getOpcode() == Opcode) { 188 Value *A = Op0->getOperand(0); 189 Value *B = Op0->getOperand(1); 190 Value *C = I.getOperand(1); 191 192 // Does "B op C" simplify? 193 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) { 194 // It simplifies to V. Form "A op V". 195 I.setOperand(0, A); 196 I.setOperand(1, V); 197 // Conservatively clear the optional flags, since they may not be 198 // preserved by the reassociation. 199 if (MaintainNoSignedWrap(I, B, C) && 200 (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) { 201 // Note: this is only valid because SimplifyBinOp doesn't look at 202 // the operands to Op0. 203 I.clearSubclassOptionalData(); 204 I.setHasNoSignedWrap(true); 205 } else { 206 I.clearSubclassOptionalData(); 207 } 208 209 Changed = true; 210 ++NumReassoc; 211 continue; 212 } 213 } 214 215 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 216 if (Op1 && Op1->getOpcode() == Opcode) { 217 Value *A = I.getOperand(0); 218 Value *B = Op1->getOperand(0); 219 Value *C = Op1->getOperand(1); 220 221 // Does "A op B" simplify? 222 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) { 223 // It simplifies to V. Form "V op C". 224 I.setOperand(0, V); 225 I.setOperand(1, C); 226 // Conservatively clear the optional flags, since they may not be 227 // preserved by the reassociation. 228 I.clearSubclassOptionalData(); 229 Changed = true; 230 ++NumReassoc; 231 continue; 232 } 233 } 234 } 235 236 if (I.isAssociative() && I.isCommutative()) { 237 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 238 if (Op0 && Op0->getOpcode() == Opcode) { 239 Value *A = Op0->getOperand(0); 240 Value *B = Op0->getOperand(1); 241 Value *C = I.getOperand(1); 242 243 // Does "C op A" simplify? 244 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 245 // It simplifies to V. Form "V op B". 246 I.setOperand(0, V); 247 I.setOperand(1, B); 248 // Conservatively clear the optional flags, since they may not be 249 // preserved by the reassociation. 250 I.clearSubclassOptionalData(); 251 Changed = true; 252 ++NumReassoc; 253 continue; 254 } 255 } 256 257 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 258 if (Op1 && Op1->getOpcode() == Opcode) { 259 Value *A = I.getOperand(0); 260 Value *B = Op1->getOperand(0); 261 Value *C = Op1->getOperand(1); 262 263 // Does "C op A" simplify? 264 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 265 // It simplifies to V. Form "B op V". 266 I.setOperand(0, B); 267 I.setOperand(1, V); 268 // Conservatively clear the optional flags, since they may not be 269 // preserved by the reassociation. 270 I.clearSubclassOptionalData(); 271 Changed = true; 272 ++NumReassoc; 273 continue; 274 } 275 } 276 277 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 278 // if C1 and C2 are constants. 279 if (Op0 && Op1 && 280 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 281 isa<Constant>(Op0->getOperand(1)) && 282 isa<Constant>(Op1->getOperand(1)) && 283 Op0->hasOneUse() && Op1->hasOneUse()) { 284 Value *A = Op0->getOperand(0); 285 Constant *C1 = cast<Constant>(Op0->getOperand(1)); 286 Value *B = Op1->getOperand(0); 287 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 288 289 Constant *Folded = ConstantExpr::get(Opcode, C1, C2); 290 BinaryOperator *New = BinaryOperator::Create(Opcode, A, B); 291 InsertNewInstWith(New, I); 292 New->takeName(Op1); 293 I.setOperand(0, New); 294 I.setOperand(1, Folded); 295 // Conservatively clear the optional flags, since they may not be 296 // preserved by the reassociation. 297 I.clearSubclassOptionalData(); 298 299 Changed = true; 300 continue; 301 } 302 } 303 304 // No further simplifications. 305 return Changed; 306 } while (1); 307} 308 309/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to 310/// "(X LOp Y) ROp (X LOp Z)". 311static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, 312 Instruction::BinaryOps ROp) { 313 switch (LOp) { 314 default: 315 return false; 316 317 case Instruction::And: 318 // And distributes over Or and Xor. 319 switch (ROp) { 320 default: 321 return false; 322 case Instruction::Or: 323 case Instruction::Xor: 324 return true; 325 } 326 327 case Instruction::Mul: 328 // Multiplication distributes over addition and subtraction. 329 switch (ROp) { 330 default: 331 return false; 332 case Instruction::Add: 333 case Instruction::Sub: 334 return true; 335 } 336 337 case Instruction::Or: 338 // Or distributes over And. 339 switch (ROp) { 340 default: 341 return false; 342 case Instruction::And: 343 return true; 344 } 345 } 346} 347 348/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to 349/// "(X ROp Z) LOp (Y ROp Z)". 350static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, 351 Instruction::BinaryOps ROp) { 352 if (Instruction::isCommutative(ROp)) 353 return LeftDistributesOverRight(ROp, LOp); 354 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 355 // but this requires knowing that the addition does not overflow and other 356 // such subtleties. 357 return false; 358} 359 360/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations 361/// which some other binary operation distributes over either by factorizing 362/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this 363/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is 364/// a win). Returns the simplified value, or null if it didn't simplify. 365Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 366 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 367 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 368 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 369 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op 370 371 // Factorization. 372 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) { 373 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 374 // a common term. 375 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1); 376 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1); 377 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 378 379 // Does "X op' Y" always equal "Y op' X"? 380 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 381 382 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 383 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 384 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 385 // commutative case, "(A op' B) op (C op' A)"? 386 if (A == C || (InnerCommutative && A == D)) { 387 if (A != C) 388 std::swap(C, D); 389 // Consider forming "A op' (B op D)". 390 // If "B op D" simplifies then it can be formed with no cost. 391 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD); 392 // If "B op D" doesn't simplify then only go on if both of the existing 393 // operations "A op' B" and "C op' D" will be zapped as no longer used. 394 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 395 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName()); 396 if (V) { 397 ++NumFactor; 398 V = Builder->CreateBinOp(InnerOpcode, A, V); 399 V->takeName(&I); 400 return V; 401 } 402 } 403 404 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 405 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 406 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 407 // commutative case, "(A op' B) op (B op' D)"? 408 if (B == D || (InnerCommutative && B == C)) { 409 if (B != D) 410 std::swap(C, D); 411 // Consider forming "(A op C) op' B". 412 // If "A op C" simplifies then it can be formed with no cost. 413 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD); 414 // If "A op C" doesn't simplify then only go on if both of the existing 415 // operations "A op' B" and "C op' D" will be zapped as no longer used. 416 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 417 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName()); 418 if (V) { 419 ++NumFactor; 420 V = Builder->CreateBinOp(InnerOpcode, V, B); 421 V->takeName(&I); 422 return V; 423 } 424 } 425 } 426 427 // Expansion. 428 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 429 // The instruction has the form "(A op' B) op C". See if expanding it out 430 // to "(A op C) op' (B op C)" results in simplifications. 431 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 432 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 433 434 // Do "A op C" and "B op C" both simplify? 435 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD)) 436 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) { 437 // They do! Return "L op' R". 438 ++NumExpand; 439 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 440 if ((L == A && R == B) || 441 (Instruction::isCommutative(InnerOpcode) && L == B && R == A)) 442 return Op0; 443 // Otherwise return "L op' R" if it simplifies. 444 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 445 return V; 446 // Otherwise, create a new instruction. 447 C = Builder->CreateBinOp(InnerOpcode, L, R); 448 C->takeName(&I); 449 return C; 450 } 451 } 452 453 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 454 // The instruction has the form "A op (B op' C)". See if expanding it out 455 // to "(A op B) op' (A op C)" results in simplifications. 456 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 457 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 458 459 // Do "A op B" and "A op C" both simplify? 460 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD)) 461 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) { 462 // They do! Return "L op' R". 463 ++NumExpand; 464 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 465 if ((L == B && R == C) || 466 (Instruction::isCommutative(InnerOpcode) && L == C && R == B)) 467 return Op1; 468 // Otherwise return "L op' R" if it simplifies. 469 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 470 return V; 471 // Otherwise, create a new instruction. 472 A = Builder->CreateBinOp(InnerOpcode, L, R); 473 A->takeName(&I); 474 return A; 475 } 476 } 477 478 return 0; 479} 480 481// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 482// if the LHS is a constant zero (which is the 'negate' form). 483// 484Value *InstCombiner::dyn_castNegVal(Value *V) const { 485 if (BinaryOperator::isNeg(V)) 486 return BinaryOperator::getNegArgument(V); 487 488 // Constants can be considered to be negated values if they can be folded. 489 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 490 return ConstantExpr::getNeg(C); 491 492 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 493 if (C->getType()->getElementType()->isIntegerTy()) 494 return ConstantExpr::getNeg(C); 495 496 return 0; 497} 498 499// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 500// instruction if the LHS is a constant negative zero (which is the 'negate' 501// form). 502// 503Value *InstCombiner::dyn_castFNegVal(Value *V) const { 504 if (BinaryOperator::isFNeg(V)) 505 return BinaryOperator::getFNegArgument(V); 506 507 // Constants can be considered to be negated values if they can be folded. 508 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 509 return ConstantExpr::getFNeg(C); 510 511 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 512 if (C->getType()->getElementType()->isFloatingPointTy()) 513 return ConstantExpr::getFNeg(C); 514 515 return 0; 516} 517 518static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 519 InstCombiner *IC) { 520 if (CastInst *CI = dyn_cast<CastInst>(&I)) { 521 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 522 } 523 524 // Figure out if the constant is the left or the right argument. 525 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 526 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 527 528 if (Constant *SOC = dyn_cast<Constant>(SO)) { 529 if (ConstIsRHS) 530 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 531 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 532 } 533 534 Value *Op0 = SO, *Op1 = ConstOperand; 535 if (!ConstIsRHS) 536 std::swap(Op0, Op1); 537 538 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 539 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 540 SO->getName()+".op"); 541 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 542 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 543 SO->getName()+".cmp"); 544 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 545 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 546 SO->getName()+".cmp"); 547 llvm_unreachable("Unknown binary instruction type!"); 548} 549 550// FoldOpIntoSelect - Given an instruction with a select as one operand and a 551// constant as the other operand, try to fold the binary operator into the 552// select arguments. This also works for Cast instructions, which obviously do 553// not have a second operand. 554Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 555 // Don't modify shared select instructions 556 if (!SI->hasOneUse()) return 0; 557 Value *TV = SI->getOperand(1); 558 Value *FV = SI->getOperand(2); 559 560 if (isa<Constant>(TV) || isa<Constant>(FV)) { 561 // Bool selects with constant operands can be folded to logical ops. 562 if (SI->getType()->isIntegerTy(1)) return 0; 563 564 // If it's a bitcast involving vectors, make sure it has the same number of 565 // elements on both sides. 566 if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) { 567 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy()); 568 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy()); 569 570 // Verify that either both or neither are vectors. 571 if ((SrcTy == NULL) != (DestTy == NULL)) return 0; 572 // If vectors, verify that they have the same number of elements. 573 if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements()) 574 return 0; 575 } 576 577 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 578 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 579 580 return SelectInst::Create(SI->getCondition(), 581 SelectTrueVal, SelectFalseVal); 582 } 583 return 0; 584} 585 586 587/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 588/// has a PHI node as operand #0, see if we can fold the instruction into the 589/// PHI (which is only possible if all operands to the PHI are constants). 590/// 591Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { 592 PHINode *PN = cast<PHINode>(I.getOperand(0)); 593 unsigned NumPHIValues = PN->getNumIncomingValues(); 594 if (NumPHIValues == 0) 595 return 0; 596 597 // We normally only transform phis with a single use. However, if a PHI has 598 // multiple uses and they are all the same operation, we can fold *all* of the 599 // uses into the PHI. 600 if (!PN->hasOneUse()) { 601 // Walk the use list for the instruction, comparing them to I. 602 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 603 UI != E; ++UI) { 604 Instruction *User = cast<Instruction>(*UI); 605 if (User != &I && !I.isIdenticalTo(User)) 606 return 0; 607 } 608 // Otherwise, we can replace *all* users with the new PHI we form. 609 } 610 611 // Check to see if all of the operands of the PHI are simple constants 612 // (constantint/constantfp/undef). If there is one non-constant value, 613 // remember the BB it is in. If there is more than one or if *it* is a PHI, 614 // bail out. We don't do arbitrary constant expressions here because moving 615 // their computation can be expensive without a cost model. 616 BasicBlock *NonConstBB = 0; 617 for (unsigned i = 0; i != NumPHIValues; ++i) { 618 Value *InVal = PN->getIncomingValue(i); 619 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal)) 620 continue; 621 622 if (isa<PHINode>(InVal)) return 0; // Itself a phi. 623 if (NonConstBB) return 0; // More than one non-const value. 624 625 NonConstBB = PN->getIncomingBlock(i); 626 627 // If the InVal is an invoke at the end of the pred block, then we can't 628 // insert a computation after it without breaking the edge. 629 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) 630 if (II->getParent() == NonConstBB) 631 return 0; 632 633 // If the incoming non-constant value is in I's block, we will remove one 634 // instruction, but insert another equivalent one, leading to infinite 635 // instcombine. 636 if (NonConstBB == I.getParent()) 637 return 0; 638 } 639 640 // If there is exactly one non-constant value, we can insert a copy of the 641 // operation in that block. However, if this is a critical edge, we would be 642 // inserting the computation one some other paths (e.g. inside a loop). Only 643 // do this if the pred block is unconditionally branching into the phi block. 644 if (NonConstBB != 0) { 645 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 646 if (!BI || !BI->isUnconditional()) return 0; 647 } 648 649 // Okay, we can do the transformation: create the new PHI node. 650 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues()); 651 InsertNewInstBefore(NewPN, *PN); 652 NewPN->takeName(PN); 653 654 // If we are going to have to insert a new computation, do so right before the 655 // predecessors terminator. 656 if (NonConstBB) 657 Builder->SetInsertPoint(NonConstBB->getTerminator()); 658 659 // Next, add all of the operands to the PHI. 660 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 661 // We only currently try to fold the condition of a select when it is a phi, 662 // not the true/false values. 663 Value *TrueV = SI->getTrueValue(); 664 Value *FalseV = SI->getFalseValue(); 665 BasicBlock *PhiTransBB = PN->getParent(); 666 for (unsigned i = 0; i != NumPHIValues; ++i) { 667 BasicBlock *ThisBB = PN->getIncomingBlock(i); 668 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 669 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 670 Value *InV = 0; 671 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 672 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 673 else 674 InV = Builder->CreateSelect(PN->getIncomingValue(i), 675 TrueVInPred, FalseVInPred, "phitmp"); 676 NewPN->addIncoming(InV, ThisBB); 677 } 678 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 679 Constant *C = cast<Constant>(I.getOperand(1)); 680 for (unsigned i = 0; i != NumPHIValues; ++i) { 681 Value *InV = 0; 682 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 683 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 684 else if (isa<ICmpInst>(CI)) 685 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), 686 C, "phitmp"); 687 else 688 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), 689 C, "phitmp"); 690 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 691 } 692 } else if (I.getNumOperands() == 2) { 693 Constant *C = cast<Constant>(I.getOperand(1)); 694 for (unsigned i = 0; i != NumPHIValues; ++i) { 695 Value *InV = 0; 696 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 697 InV = ConstantExpr::get(I.getOpcode(), InC, C); 698 else 699 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 700 PN->getIncomingValue(i), C, "phitmp"); 701 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 702 } 703 } else { 704 CastInst *CI = cast<CastInst>(&I); 705 Type *RetTy = CI->getType(); 706 for (unsigned i = 0; i != NumPHIValues; ++i) { 707 Value *InV; 708 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 709 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 710 else 711 InV = Builder->CreateCast(CI->getOpcode(), 712 PN->getIncomingValue(i), I.getType(), "phitmp"); 713 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 714 } 715 } 716 717 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 718 UI != E; ) { 719 Instruction *User = cast<Instruction>(*UI++); 720 if (User == &I) continue; 721 ReplaceInstUsesWith(*User, NewPN); 722 EraseInstFromFunction(*User); 723 } 724 return ReplaceInstUsesWith(I, NewPN); 725} 726 727/// FindElementAtOffset - Given a type and a constant offset, determine whether 728/// or not there is a sequence of GEP indices into the type that will land us at 729/// the specified offset. If so, fill them into NewIndices and return the 730/// resultant element type, otherwise return null. 731Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, 732 SmallVectorImpl<Value*> &NewIndices) { 733 if (!TD) return 0; 734 if (!Ty->isSized()) return 0; 735 736 // Start with the index over the outer type. Note that the type size 737 // might be zero (even if the offset isn't zero) if the indexed type 738 // is something like [0 x {int, int}] 739 Type *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 740 int64_t FirstIdx = 0; 741 if (int64_t TySize = TD->getTypeAllocSize(Ty)) { 742 FirstIdx = Offset/TySize; 743 Offset -= FirstIdx*TySize; 744 745 // Handle hosts where % returns negative instead of values [0..TySize). 746 if (Offset < 0) { 747 --FirstIdx; 748 Offset += TySize; 749 assert(Offset >= 0); 750 } 751 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 752 } 753 754 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 755 756 // Index into the types. If we fail, set OrigBase to null. 757 while (Offset) { 758 // Indexing into tail padding between struct/array elements. 759 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty)) 760 return 0; 761 762 if (StructType *STy = dyn_cast<StructType>(Ty)) { 763 const StructLayout *SL = TD->getStructLayout(STy); 764 assert(Offset < (int64_t)SL->getSizeInBytes() && 765 "Offset must stay within the indexed type"); 766 767 unsigned Elt = SL->getElementContainingOffset(Offset); 768 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 769 Elt)); 770 771 Offset -= SL->getElementOffset(Elt); 772 Ty = STy->getElementType(Elt); 773 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 774 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); 775 assert(EltSize && "Cannot index into a zero-sized array"); 776 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 777 Offset %= EltSize; 778 Ty = AT->getElementType(); 779 } else { 780 // Otherwise, we can't index into the middle of this atomic type, bail. 781 return 0; 782 } 783 } 784 785 return Ty; 786} 787 788static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { 789 // If this GEP has only 0 indices, it is the same pointer as 790 // Src. If Src is not a trivial GEP too, don't combine 791 // the indices. 792 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 793 !Src.hasOneUse()) 794 return false; 795 return true; 796} 797 798Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 799 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 800 801 if (Value *V = SimplifyGEPInst(Ops, TD)) 802 return ReplaceInstUsesWith(GEP, V); 803 804 Value *PtrOp = GEP.getOperand(0); 805 806 // Eliminate unneeded casts for indices, and replace indices which displace 807 // by multiples of a zero size type with zero. 808 if (TD) { 809 bool MadeChange = false; 810 Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); 811 812 gep_type_iterator GTI = gep_type_begin(GEP); 813 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 814 I != E; ++I, ++GTI) { 815 // Skip indices into struct types. 816 SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI); 817 if (!SeqTy) continue; 818 819 // If the element type has zero size then any index over it is equivalent 820 // to an index of zero, so replace it with zero if it is not zero already. 821 if (SeqTy->getElementType()->isSized() && 822 TD->getTypeAllocSize(SeqTy->getElementType()) == 0) 823 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { 824 *I = Constant::getNullValue(IntPtrTy); 825 MadeChange = true; 826 } 827 828 if ((*I)->getType() != IntPtrTy) { 829 // If we are using a wider index than needed for this platform, shrink 830 // it to what we need. If narrower, sign-extend it to what we need. 831 // This explicit cast can make subsequent optimizations more obvious. 832 *I = Builder->CreateIntCast(*I, IntPtrTy, true); 833 MadeChange = true; 834 } 835 } 836 if (MadeChange) return &GEP; 837 } 838 839 // Combine Indices - If the source pointer to this getelementptr instruction 840 // is a getelementptr instruction, combine the indices of the two 841 // getelementptr instructions into a single instruction. 842 // 843 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 844 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src)) 845 return 0; 846 847 // Note that if our source is a gep chain itself that we wait for that 848 // chain to be resolved before we perform this transformation. This 849 // avoids us creating a TON of code in some cases. 850 if (GEPOperator *SrcGEP = 851 dyn_cast<GEPOperator>(Src->getOperand(0))) 852 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP)) 853 return 0; // Wait until our source is folded to completion. 854 855 SmallVector<Value*, 8> Indices; 856 857 // Find out whether the last index in the source GEP is a sequential idx. 858 bool EndsWithSequential = false; 859 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 860 I != E; ++I) 861 EndsWithSequential = !(*I)->isStructTy(); 862 863 // Can we combine the two pointer arithmetics offsets? 864 if (EndsWithSequential) { 865 // Replace: gep (gep %P, long B), long A, ... 866 // With: T = long A+B; gep %P, T, ... 867 // 868 Value *Sum; 869 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 870 Value *GO1 = GEP.getOperand(1); 871 if (SO1 == Constant::getNullValue(SO1->getType())) { 872 Sum = GO1; 873 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 874 Sum = SO1; 875 } else { 876 // If they aren't the same type, then the input hasn't been processed 877 // by the loop above yet (which canonicalizes sequential index types to 878 // intptr_t). Just avoid transforming this until the input has been 879 // normalized. 880 if (SO1->getType() != GO1->getType()) 881 return 0; 882 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 883 } 884 885 // Update the GEP in place if possible. 886 if (Src->getNumOperands() == 2) { 887 GEP.setOperand(0, Src->getOperand(0)); 888 GEP.setOperand(1, Sum); 889 return &GEP; 890 } 891 Indices.append(Src->op_begin()+1, Src->op_end()-1); 892 Indices.push_back(Sum); 893 Indices.append(GEP.op_begin()+2, GEP.op_end()); 894 } else if (isa<Constant>(*GEP.idx_begin()) && 895 cast<Constant>(*GEP.idx_begin())->isNullValue() && 896 Src->getNumOperands() != 1) { 897 // Otherwise we can do the fold if the first index of the GEP is a zero 898 Indices.append(Src->op_begin()+1, Src->op_end()); 899 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 900 } 901 902 if (!Indices.empty()) 903 return (GEP.isInBounds() && Src->isInBounds()) ? 904 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices, 905 GEP.getName()) : 906 GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName()); 907 } 908 909 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 910 Value *StrippedPtr = PtrOp->stripPointerCasts(); 911 PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); 912 if (StrippedPtr != PtrOp && 913 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) { 914 915 bool HasZeroPointerIndex = false; 916 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 917 HasZeroPointerIndex = C->isZero(); 918 919 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 920 // into : GEP [10 x i8]* X, i32 0, ... 921 // 922 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 923 // into : GEP i8* X, ... 924 // 925 // This occurs when the program declares an array extern like "int X[];" 926 if (HasZeroPointerIndex) { 927 PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 928 if (ArrayType *CATy = 929 dyn_cast<ArrayType>(CPTy->getElementType())) { 930 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 931 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 932 // -> GEP i8* X, ... 933 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 934 GetElementPtrInst *Res = 935 GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName()); 936 Res->setIsInBounds(GEP.isInBounds()); 937 return Res; 938 } 939 940 if (ArrayType *XATy = 941 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 942 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 943 if (CATy->getElementType() == XATy->getElementType()) { 944 // -> GEP [10 x i8]* X, i32 0, ... 945 // At this point, we know that the cast source type is a pointer 946 // to an array of the same type as the destination pointer 947 // array. Because the array type is never stepped over (there 948 // is a leading zero) we can fold the cast into this GEP. 949 GEP.setOperand(0, StrippedPtr); 950 return &GEP; 951 } 952 } 953 } 954 } else if (GEP.getNumOperands() == 2) { 955 // Transform things like: 956 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 957 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 958 Type *SrcElTy = StrippedPtrTy->getElementType(); 959 Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); 960 if (TD && SrcElTy->isArrayTy() && 961 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == 962 TD->getTypeAllocSize(ResElTy)) { 963 Value *Idx[2]; 964 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 965 Idx[1] = GEP.getOperand(1); 966 Value *NewGEP = GEP.isInBounds() ? 967 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) : 968 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); 969 // V and GEP are both pointer types --> BitCast 970 return new BitCastInst(NewGEP, GEP.getType()); 971 } 972 973 // Transform things like: 974 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 975 // (where tmp = 8*tmp2) into: 976 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 977 978 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { 979 uint64_t ArrayEltSize = 980 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); 981 982 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We 983 // allow either a mul, shift, or constant here. 984 Value *NewIdx = 0; 985 ConstantInt *Scale = 0; 986 if (ArrayEltSize == 1) { 987 NewIdx = GEP.getOperand(1); 988 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); 989 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) { 990 NewIdx = ConstantInt::get(CI->getType(), 1); 991 Scale = CI; 992 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){ 993 if (Inst->getOpcode() == Instruction::Shl && 994 isa<ConstantInt>(Inst->getOperand(1))) { 995 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); 996 uint32_t ShAmtVal = ShAmt->getLimitedValue(64); 997 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 998 1ULL << ShAmtVal); 999 NewIdx = Inst->getOperand(0); 1000 } else if (Inst->getOpcode() == Instruction::Mul && 1001 isa<ConstantInt>(Inst->getOperand(1))) { 1002 Scale = cast<ConstantInt>(Inst->getOperand(1)); 1003 NewIdx = Inst->getOperand(0); 1004 } 1005 } 1006 1007 // If the index will be to exactly the right offset with the scale taken 1008 // out, perform the transformation. Note, we don't know whether Scale is 1009 // signed or not. We'll use unsigned version of division/modulo 1010 // operation after making sure Scale doesn't have the sign bit set. 1011 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && 1012 Scale->getZExtValue() % ArrayEltSize == 0) { 1013 Scale = ConstantInt::get(Scale->getType(), 1014 Scale->getZExtValue() / ArrayEltSize); 1015 if (Scale->getZExtValue() != 1) { 1016 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), 1017 false /*ZExt*/); 1018 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); 1019 } 1020 1021 // Insert the new GEP instruction. 1022 Value *Idx[2]; 1023 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 1024 Idx[1] = NewIdx; 1025 Value *NewGEP = GEP.isInBounds() ? 1026 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()): 1027 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); 1028 // The NewGEP must be pointer typed, so must the old one -> BitCast 1029 return new BitCastInst(NewGEP, GEP.getType()); 1030 } 1031 } 1032 } 1033 } 1034 1035 /// See if we can simplify: 1036 /// X = bitcast A* to B* 1037 /// Y = gep X, <...constant indices...> 1038 /// into a gep of the original struct. This is important for SROA and alias 1039 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 1040 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 1041 if (TD && 1042 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() && 1043 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) { 1044 1045 // Determine how much the GEP moves the pointer. We are guaranteed to get 1046 // a constant back from EmitGEPOffset. 1047 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP)); 1048 int64_t Offset = OffsetV->getSExtValue(); 1049 1050 // If this GEP instruction doesn't move the pointer, just replace the GEP 1051 // with a bitcast of the real input to the dest type. 1052 if (Offset == 0) { 1053 // If the bitcast is of an allocation, and the allocation will be 1054 // converted to match the type of the cast, don't touch this. 1055 if (isa<AllocaInst>(BCI->getOperand(0)) || 1056 isMalloc(BCI->getOperand(0))) { 1057 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 1058 if (Instruction *I = visitBitCast(*BCI)) { 1059 if (I != BCI) { 1060 I->takeName(BCI); 1061 BCI->getParent()->getInstList().insert(BCI, I); 1062 ReplaceInstUsesWith(*BCI, I); 1063 } 1064 return &GEP; 1065 } 1066 } 1067 return new BitCastInst(BCI->getOperand(0), GEP.getType()); 1068 } 1069 1070 // Otherwise, if the offset is non-zero, we need to find out if there is a 1071 // field at Offset in 'A's type. If so, we can pull the cast through the 1072 // GEP. 1073 SmallVector<Value*, 8> NewIndices; 1074 Type *InTy = 1075 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType(); 1076 if (FindElementAtOffset(InTy, Offset, NewIndices)) { 1077 Value *NGEP = GEP.isInBounds() ? 1078 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) : 1079 Builder->CreateGEP(BCI->getOperand(0), NewIndices); 1080 1081 if (NGEP->getType() == GEP.getType()) 1082 return ReplaceInstUsesWith(GEP, NGEP); 1083 NGEP->takeName(&GEP); 1084 return new BitCastInst(NGEP, GEP.getType()); 1085 } 1086 } 1087 } 1088 1089 return 0; 1090} 1091 1092 1093 1094static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users, 1095 int Depth = 0) { 1096 if (Depth == 8) 1097 return false; 1098 1099 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); 1100 UI != UE; ++UI) { 1101 User *U = *UI; 1102 if (isFreeCall(U)) { 1103 Users.push_back(U); 1104 continue; 1105 } 1106 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) { 1107 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) { 1108 Users.push_back(ICI); 1109 continue; 1110 } 1111 } 1112 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 1113 if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) { 1114 Users.push_back(BCI); 1115 continue; 1116 } 1117 } 1118 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1119 if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) { 1120 Users.push_back(GEPI); 1121 continue; 1122 } 1123 } 1124 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1125 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 1126 II->getIntrinsicID() == Intrinsic::lifetime_end) { 1127 Users.push_back(II); 1128 continue; 1129 } 1130 } 1131 return false; 1132 } 1133 return true; 1134} 1135 1136Instruction *InstCombiner::visitMalloc(Instruction &MI) { 1137 // If we have a malloc call which is only used in any amount of comparisons 1138 // to null and free calls, delete the calls and replace the comparisons with 1139 // true or false as appropriate. 1140 SmallVector<WeakVH, 64> Users; 1141 if (IsOnlyNullComparedAndFreed(&MI, Users)) { 1142 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 1143 Instruction *I = cast_or_null<Instruction>(&*Users[i]); 1144 if (!I) continue; 1145 1146 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) { 1147 ReplaceInstUsesWith(*C, 1148 ConstantInt::get(Type::getInt1Ty(C->getContext()), 1149 C->isFalseWhenEqual())); 1150 } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) { 1151 ReplaceInstUsesWith(*I, UndefValue::get(I->getType())); 1152 } 1153 EraseInstFromFunction(*I); 1154 } 1155 return EraseInstFromFunction(MI); 1156 } 1157 return 0; 1158} 1159 1160 1161 1162Instruction *InstCombiner::visitFree(CallInst &FI) { 1163 Value *Op = FI.getArgOperand(0); 1164 1165 // free undef -> unreachable. 1166 if (isa<UndefValue>(Op)) { 1167 // Insert a new store to null because we cannot modify the CFG here. 1168 Builder->CreateStore(ConstantInt::getTrue(FI.getContext()), 1169 UndefValue::get(Type::getInt1PtrTy(FI.getContext()))); 1170 return EraseInstFromFunction(FI); 1171 } 1172 1173 // If we have 'free null' delete the instruction. This can happen in stl code 1174 // when lots of inlining happens. 1175 if (isa<ConstantPointerNull>(Op)) 1176 return EraseInstFromFunction(FI); 1177 1178 return 0; 1179} 1180 1181 1182 1183Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 1184 // Change br (not X), label True, label False to: br X, label False, True 1185 Value *X = 0; 1186 BasicBlock *TrueDest; 1187 BasicBlock *FalseDest; 1188 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 1189 !isa<Constant>(X)) { 1190 // Swap Destinations and condition... 1191 BI.setCondition(X); 1192 BI.setSuccessor(0, FalseDest); 1193 BI.setSuccessor(1, TrueDest); 1194 return &BI; 1195 } 1196 1197 // Cannonicalize fcmp_one -> fcmp_oeq 1198 FCmpInst::Predicate FPred; Value *Y; 1199 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 1200 TrueDest, FalseDest)) && 1201 BI.getCondition()->hasOneUse()) 1202 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 1203 FPred == FCmpInst::FCMP_OGE) { 1204 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 1205 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 1206 1207 // Swap Destinations and condition. 1208 BI.setSuccessor(0, FalseDest); 1209 BI.setSuccessor(1, TrueDest); 1210 Worklist.Add(Cond); 1211 return &BI; 1212 } 1213 1214 // Cannonicalize icmp_ne -> icmp_eq 1215 ICmpInst::Predicate IPred; 1216 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 1217 TrueDest, FalseDest)) && 1218 BI.getCondition()->hasOneUse()) 1219 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 1220 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 1221 IPred == ICmpInst::ICMP_SGE) { 1222 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 1223 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 1224 // Swap Destinations and condition. 1225 BI.setSuccessor(0, FalseDest); 1226 BI.setSuccessor(1, TrueDest); 1227 Worklist.Add(Cond); 1228 return &BI; 1229 } 1230 1231 return 0; 1232} 1233 1234Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 1235 Value *Cond = SI.getCondition(); 1236 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 1237 if (I->getOpcode() == Instruction::Add) 1238 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 1239 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 1240 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2) 1241 SI.setOperand(i, 1242 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)), 1243 AddRHS)); 1244 SI.setOperand(0, I->getOperand(0)); 1245 Worklist.Add(I); 1246 return &SI; 1247 } 1248 } 1249 return 0; 1250} 1251 1252Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 1253 Value *Agg = EV.getAggregateOperand(); 1254 1255 if (!EV.hasIndices()) 1256 return ReplaceInstUsesWith(EV, Agg); 1257 1258 if (Constant *C = dyn_cast<Constant>(Agg)) { 1259 if (isa<UndefValue>(C)) 1260 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); 1261 1262 if (isa<ConstantAggregateZero>(C)) 1263 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); 1264 1265 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 1266 // Extract the element indexed by the first index out of the constant 1267 Value *V = C->getOperand(*EV.idx_begin()); 1268 if (EV.getNumIndices() > 1) 1269 // Extract the remaining indices out of the constant indexed by the 1270 // first index 1271 return ExtractValueInst::Create(V, EV.getIndices().slice(1)); 1272 else 1273 return ReplaceInstUsesWith(EV, V); 1274 } 1275 return 0; // Can't handle other constants 1276 } 1277 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 1278 // We're extracting from an insertvalue instruction, compare the indices 1279 const unsigned *exti, *exte, *insi, *inse; 1280 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 1281 exte = EV.idx_end(), inse = IV->idx_end(); 1282 exti != exte && insi != inse; 1283 ++exti, ++insi) { 1284 if (*insi != *exti) 1285 // The insert and extract both reference distinctly different elements. 1286 // This means the extract is not influenced by the insert, and we can 1287 // replace the aggregate operand of the extract with the aggregate 1288 // operand of the insert. i.e., replace 1289 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1290 // %E = extractvalue { i32, { i32 } } %I, 0 1291 // with 1292 // %E = extractvalue { i32, { i32 } } %A, 0 1293 return ExtractValueInst::Create(IV->getAggregateOperand(), 1294 EV.getIndices()); 1295 } 1296 if (exti == exte && insi == inse) 1297 // Both iterators are at the end: Index lists are identical. Replace 1298 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1299 // %C = extractvalue { i32, { i32 } } %B, 1, 0 1300 // with "i32 42" 1301 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 1302 if (exti == exte) { 1303 // The extract list is a prefix of the insert list. i.e. replace 1304 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1305 // %E = extractvalue { i32, { i32 } } %I, 1 1306 // with 1307 // %X = extractvalue { i32, { i32 } } %A, 1 1308 // %E = insertvalue { i32 } %X, i32 42, 0 1309 // by switching the order of the insert and extract (though the 1310 // insertvalue should be left in, since it may have other uses). 1311 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 1312 EV.getIndices()); 1313 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 1314 makeArrayRef(insi, inse)); 1315 } 1316 if (insi == inse) 1317 // The insert list is a prefix of the extract list 1318 // We can simply remove the common indices from the extract and make it 1319 // operate on the inserted value instead of the insertvalue result. 1320 // i.e., replace 1321 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1322 // %E = extractvalue { i32, { i32 } } %I, 1, 0 1323 // with 1324 // %E extractvalue { i32 } { i32 42 }, 0 1325 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 1326 makeArrayRef(exti, exte)); 1327 } 1328 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 1329 // We're extracting from an intrinsic, see if we're the only user, which 1330 // allows us to simplify multiple result intrinsics to simpler things that 1331 // just get one value. 1332 if (II->hasOneUse()) { 1333 // Check if we're grabbing the overflow bit or the result of a 'with 1334 // overflow' intrinsic. If it's the latter we can remove the intrinsic 1335 // and replace it with a traditional binary instruction. 1336 switch (II->getIntrinsicID()) { 1337 case Intrinsic::uadd_with_overflow: 1338 case Intrinsic::sadd_with_overflow: 1339 if (*EV.idx_begin() == 0) { // Normal result. 1340 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1341 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 1342 EraseInstFromFunction(*II); 1343 return BinaryOperator::CreateAdd(LHS, RHS); 1344 } 1345 1346 // If the normal result of the add is dead, and the RHS is a constant, 1347 // we can transform this into a range comparison. 1348 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 1349 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) 1350 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1))) 1351 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), 1352 ConstantExpr::getNot(CI)); 1353 break; 1354 case Intrinsic::usub_with_overflow: 1355 case Intrinsic::ssub_with_overflow: 1356 if (*EV.idx_begin() == 0) { // Normal result. 1357 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1358 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 1359 EraseInstFromFunction(*II); 1360 return BinaryOperator::CreateSub(LHS, RHS); 1361 } 1362 break; 1363 case Intrinsic::umul_with_overflow: 1364 case Intrinsic::smul_with_overflow: 1365 if (*EV.idx_begin() == 0) { // Normal result. 1366 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1367 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 1368 EraseInstFromFunction(*II); 1369 return BinaryOperator::CreateMul(LHS, RHS); 1370 } 1371 break; 1372 default: 1373 break; 1374 } 1375 } 1376 } 1377 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 1378 // If the (non-volatile) load only has one use, we can rewrite this to a 1379 // load from a GEP. This reduces the size of the load. 1380 // FIXME: If a load is used only by extractvalue instructions then this 1381 // could be done regardless of having multiple uses. 1382 if (L->isSimple() && L->hasOneUse()) { 1383 // extractvalue has integer indices, getelementptr has Value*s. Convert. 1384 SmallVector<Value*, 4> Indices; 1385 // Prefix an i32 0 since we need the first element. 1386 Indices.push_back(Builder->getInt32(0)); 1387 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 1388 I != E; ++I) 1389 Indices.push_back(Builder->getInt32(*I)); 1390 1391 // We need to insert these at the location of the old load, not at that of 1392 // the extractvalue. 1393 Builder->SetInsertPoint(L->getParent(), L); 1394 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices); 1395 // Returning the load directly will cause the main loop to insert it in 1396 // the wrong spot, so use ReplaceInstUsesWith(). 1397 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); 1398 } 1399 // We could simplify extracts from other values. Note that nested extracts may 1400 // already be simplified implicitly by the above: extract (extract (insert) ) 1401 // will be translated into extract ( insert ( extract ) ) first and then just 1402 // the value inserted, if appropriate. Similarly for extracts from single-use 1403 // loads: extract (extract (load)) will be translated to extract (load (gep)) 1404 // and if again single-use then via load (gep (gep)) to load (gep). 1405 // However, double extracts from e.g. function arguments or return values 1406 // aren't handled yet. 1407 return 0; 1408} 1409 1410 1411 1412 1413/// TryToSinkInstruction - Try to move the specified instruction from its 1414/// current block into the beginning of DestBlock, which can only happen if it's 1415/// safe to move the instruction past all of the instructions between it and the 1416/// end of its block. 1417static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 1418 assert(I->hasOneUse() && "Invariants didn't hold!"); 1419 1420 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 1421 if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() || 1422 isa<TerminatorInst>(I)) 1423 return false; 1424 1425 // Do not sink alloca instructions out of the entry block. 1426 if (isa<AllocaInst>(I) && I->getParent() == 1427 &DestBlock->getParent()->getEntryBlock()) 1428 return false; 1429 1430 // We can only sink load instructions if there is nothing between the load and 1431 // the end of block that could change the value. 1432 if (I->mayReadFromMemory()) { 1433 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 1434 Scan != E; ++Scan) 1435 if (Scan->mayWriteToMemory()) 1436 return false; 1437 } 1438 1439 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); 1440 I->moveBefore(InsertPos); 1441 ++NumSunkInst; 1442 return true; 1443} 1444 1445 1446/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 1447/// all reachable code to the worklist. 1448/// 1449/// This has a couple of tricks to make the code faster and more powerful. In 1450/// particular, we constant fold and DCE instructions as we go, to avoid adding 1451/// them to the worklist (this significantly speeds up instcombine on code where 1452/// many instructions are dead or constant). Additionally, if we find a branch 1453/// whose condition is a known constant, we only visit the reachable successors. 1454/// 1455static bool AddReachableCodeToWorklist(BasicBlock *BB, 1456 SmallPtrSet<BasicBlock*, 64> &Visited, 1457 InstCombiner &IC, 1458 const TargetData *TD) { 1459 bool MadeIRChange = false; 1460 SmallVector<BasicBlock*, 256> Worklist; 1461 Worklist.push_back(BB); 1462 1463 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 1464 DenseMap<ConstantExpr*, Constant*> FoldedConstants; 1465 1466 do { 1467 BB = Worklist.pop_back_val(); 1468 1469 // We have now visited this block! If we've already been here, ignore it. 1470 if (!Visited.insert(BB)) continue; 1471 1472 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 1473 Instruction *Inst = BBI++; 1474 1475 // DCE instruction if trivially dead. 1476 if (isInstructionTriviallyDead(Inst)) { 1477 ++NumDeadInst; 1478 DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); 1479 Inst->eraseFromParent(); 1480 continue; 1481 } 1482 1483 // ConstantProp instruction if trivially constant. 1484 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 1485 if (Constant *C = ConstantFoldInstruction(Inst, TD)) { 1486 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " 1487 << *Inst << '\n'); 1488 Inst->replaceAllUsesWith(C); 1489 ++NumConstProp; 1490 Inst->eraseFromParent(); 1491 continue; 1492 } 1493 1494 if (TD) { 1495 // See if we can constant fold its operands. 1496 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 1497 i != e; ++i) { 1498 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 1499 if (CE == 0) continue; 1500 1501 Constant*& FoldRes = FoldedConstants[CE]; 1502 if (!FoldRes) 1503 FoldRes = ConstantFoldConstantExpression(CE, TD); 1504 if (!FoldRes) 1505 FoldRes = CE; 1506 1507 if (FoldRes != CE) { 1508 *i = FoldRes; 1509 MadeIRChange = true; 1510 } 1511 } 1512 } 1513 1514 InstrsForInstCombineWorklist.push_back(Inst); 1515 } 1516 1517 // Recursively visit successors. If this is a branch or switch on a 1518 // constant, only visit the reachable successor. 1519 TerminatorInst *TI = BB->getTerminator(); 1520 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1521 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 1522 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 1523 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 1524 Worklist.push_back(ReachableBB); 1525 continue; 1526 } 1527 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1528 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 1529 // See if this is an explicit destination. 1530 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) 1531 if (SI->getCaseValue(i) == Cond) { 1532 BasicBlock *ReachableBB = SI->getSuccessor(i); 1533 Worklist.push_back(ReachableBB); 1534 continue; 1535 } 1536 1537 // Otherwise it is the default destination. 1538 Worklist.push_back(SI->getSuccessor(0)); 1539 continue; 1540 } 1541 } 1542 1543 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 1544 Worklist.push_back(TI->getSuccessor(i)); 1545 } while (!Worklist.empty()); 1546 1547 // Once we've found all of the instructions to add to instcombine's worklist, 1548 // add them in reverse order. This way instcombine will visit from the top 1549 // of the function down. This jives well with the way that it adds all uses 1550 // of instructions to the worklist after doing a transformation, thus avoiding 1551 // some N^2 behavior in pathological cases. 1552 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 1553 InstrsForInstCombineWorklist.size()); 1554 1555 return MadeIRChange; 1556} 1557 1558bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 1559 MadeIRChange = false; 1560 1561 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 1562 << F.getNameStr() << "\n"); 1563 1564 { 1565 // Do a depth-first traversal of the function, populate the worklist with 1566 // the reachable instructions. Ignore blocks that are not reachable. Keep 1567 // track of which blocks we visit. 1568 SmallPtrSet<BasicBlock*, 64> Visited; 1569 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD); 1570 1571 // Do a quick scan over the function. If we find any blocks that are 1572 // unreachable, remove any instructions inside of them. This prevents 1573 // the instcombine code from having to deal with some bad special cases. 1574 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 1575 if (!Visited.count(BB)) { 1576 Instruction *Term = BB->getTerminator(); 1577 1578 if (isa<TerminatorInst>(BB->begin())) 1579 continue; 1580 1581 // Delete the instructions backwards, as it has a reduced likelihood of 1582 // having to update as many def-use and use-def chains. 1583 std::vector<Instruction*> WorkList; 1584 WorkList.reserve(BB->size()); 1585 BasicBlock::iterator I = Term; --I; 1586 1587 while (true) { 1588 if (!I->getType()->isVoidTy()) 1589 I->replaceAllUsesWith(UndefValue::get(I->getType())); 1590 WorkList.push_back(I); 1591 if (I == BB->begin()) 1592 break; 1593 --I; 1594 } 1595 1596 for (std::vector<Instruction*>::iterator 1597 II = WorkList.begin(), IE = WorkList.end(); II != IE; ++II) { 1598 Instruction *Inst = *II; 1599 // Don't remove the landing pad. It should be removed only when its 1600 // invokes are removed. 1601 if (isa<LandingPadInst>(Inst)) 1602 continue; 1603 1604 // A debug intrinsic shouldn't force another iteration if we weren't 1605 // going to do one without it. 1606 if (!isa<DbgInfoIntrinsic>(Inst)) { 1607 ++NumDeadInst; 1608 MadeIRChange = true; 1609 } 1610 1611 Inst->eraseFromParent(); 1612 } 1613 } 1614 } 1615 1616 while (!Worklist.isEmpty()) { 1617 Instruction *I = Worklist.RemoveOne(); 1618 if (I == 0) continue; // skip null values. 1619 1620 // Check to see if we can DCE the instruction. 1621 if (isInstructionTriviallyDead(I)) { 1622 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1623 EraseInstFromFunction(*I); 1624 ++NumDeadInst; 1625 MadeIRChange = true; 1626 continue; 1627 } 1628 1629 // Instruction isn't dead, see if we can constant propagate it. 1630 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 1631 if (Constant *C = ConstantFoldInstruction(I, TD)) { 1632 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 1633 1634 // Add operands to the worklist. 1635 ReplaceInstUsesWith(*I, C); 1636 ++NumConstProp; 1637 EraseInstFromFunction(*I); 1638 MadeIRChange = true; 1639 continue; 1640 } 1641 1642 // See if we can trivially sink this instruction to a successor basic block. 1643 if (I->hasOneUse()) { 1644 BasicBlock *BB = I->getParent(); 1645 Instruction *UserInst = cast<Instruction>(I->use_back()); 1646 BasicBlock *UserParent; 1647 1648 // Get the block the use occurs in. 1649 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 1650 UserParent = PN->getIncomingBlock(I->use_begin().getUse()); 1651 else 1652 UserParent = UserInst->getParent(); 1653 1654 if (UserParent != BB) { 1655 bool UserIsSuccessor = false; 1656 // See if the user is one of our successors. 1657 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 1658 if (*SI == UserParent) { 1659 UserIsSuccessor = true; 1660 break; 1661 } 1662 1663 // If the user is one of our immediate successors, and if that successor 1664 // only has us as a predecessors (we'd have to split the critical edge 1665 // otherwise), we can keep going. 1666 if (UserIsSuccessor && UserParent->getSinglePredecessor()) 1667 // Okay, the CFG is simple enough, try to sink this instruction. 1668 MadeIRChange |= TryToSinkInstruction(I, UserParent); 1669 } 1670 } 1671 1672 // Now that we have an instruction, try combining it to simplify it. 1673 Builder->SetInsertPoint(I->getParent(), I); 1674 Builder->SetCurrentDebugLocation(I->getDebugLoc()); 1675 1676#ifndef NDEBUG 1677 std::string OrigI; 1678#endif 1679 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 1680 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n'); 1681 1682 if (Instruction *Result = visit(*I)) { 1683 ++NumCombined; 1684 // Should we replace the old instruction with a new one? 1685 if (Result != I) { 1686 DEBUG(errs() << "IC: Old = " << *I << '\n' 1687 << " New = " << *Result << '\n'); 1688 1689 if (!I->getDebugLoc().isUnknown()) 1690 Result->setDebugLoc(I->getDebugLoc()); 1691 // Everything uses the new instruction now. 1692 I->replaceAllUsesWith(Result); 1693 1694 // Push the new instruction and any users onto the worklist. 1695 Worklist.Add(Result); 1696 Worklist.AddUsersToWorkList(*Result); 1697 1698 // Move the name to the new instruction first. 1699 Result->takeName(I); 1700 1701 // Insert the new instruction into the basic block... 1702 BasicBlock *InstParent = I->getParent(); 1703 BasicBlock::iterator InsertPos = I; 1704 1705 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert 1706 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs. 1707 ++InsertPos; 1708 1709 InstParent->getInstList().insert(InsertPos, Result); 1710 1711 EraseInstFromFunction(*I); 1712 } else { 1713#ifndef NDEBUG 1714 DEBUG(errs() << "IC: Mod = " << OrigI << '\n' 1715 << " New = " << *I << '\n'); 1716#endif 1717 1718 // If the instruction was modified, it's possible that it is now dead. 1719 // if so, remove it. 1720 if (isInstructionTriviallyDead(I)) { 1721 EraseInstFromFunction(*I); 1722 } else { 1723 Worklist.Add(I); 1724 Worklist.AddUsersToWorkList(*I); 1725 } 1726 } 1727 MadeIRChange = true; 1728 } 1729 } 1730 1731 Worklist.Zap(); 1732 return MadeIRChange; 1733} 1734 1735 1736bool InstCombiner::runOnFunction(Function &F) { 1737 TD = getAnalysisIfAvailable<TargetData>(); 1738 1739 1740 /// Builder - This is an IRBuilder that automatically inserts new 1741 /// instructions into the worklist when they are created. 1742 IRBuilder<true, TargetFolder, InstCombineIRInserter> 1743 TheBuilder(F.getContext(), TargetFolder(TD), 1744 InstCombineIRInserter(Worklist)); 1745 Builder = &TheBuilder; 1746 1747 bool EverMadeChange = false; 1748 1749 // Lower dbg.declare intrinsics otherwise their value may be clobbered 1750 // by instcombiner. 1751 EverMadeChange = LowerDbgDeclare(F); 1752 1753 // Iterate while there is work to do. 1754 unsigned Iteration = 0; 1755 while (DoOneIteration(F, Iteration++)) 1756 EverMadeChange = true; 1757 1758 Builder = 0; 1759 return EverMadeChange; 1760} 1761 1762FunctionPass *llvm::createInstructionCombiningPass() { 1763 return new InstCombiner(); 1764} 1765