InstructionCombining.cpp revision 192228edb1c08ca11da2df959072bcaa99eacd63
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// InstructionCombining - Combine instructions to form fewer, simple 11// instructions. This pass does not modify the CFG. This pass is where 12// algebraic simplification happens. 13// 14// This pass combines things like: 15// %Y = add i32 %X, 1 16// %Z = add i32 %Y, 1 17// into: 18// %Z = add i32 %X, 2 19// 20// This is a simple worklist driven algorithm. 21// 22// This pass guarantees that the following canonicalizations are performed on 23// the program: 24// 1. If a binary operator has a constant operand, it is moved to the RHS 25// 2. Bitwise operators with constant operands are always grouped so that 26// shifts are performed first, then or's, then and's, then xor's. 27// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 28// 4. All cmp instructions on boolean values are replaced with logical ops 29// 5. add X, X is represented as (X*2) => (X << 1) 30// 6. Multiplies with a power-of-two constant argument are transformed into 31// shifts. 32// ... etc. 33// 34//===----------------------------------------------------------------------===// 35 36#define DEBUG_TYPE "instcombine" 37#include "llvm/Transforms/Scalar.h" 38#include "InstCombine.h" 39#include "llvm/IntrinsicInst.h" 40#include "llvm/Analysis/ConstantFolding.h" 41#include "llvm/Analysis/InstructionSimplify.h" 42#include "llvm/Analysis/MemoryBuiltins.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Transforms/Utils/Local.h" 45#include "llvm/Support/CFG.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/GetElementPtrTypeIterator.h" 48#include "llvm/Support/PatternMatch.h" 49#include "llvm/ADT/SmallPtrSet.h" 50#include "llvm/ADT/Statistic.h" 51#include "llvm-c/Initialization.h" 52#include <algorithm> 53#include <climits> 54using namespace llvm; 55using namespace llvm::PatternMatch; 56 57STATISTIC(NumCombined , "Number of insts combined"); 58STATISTIC(NumConstProp, "Number of constant folds"); 59STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 60STATISTIC(NumSunkInst , "Number of instructions sunk"); 61STATISTIC(NumExpand, "Number of expansions"); 62STATISTIC(NumFactor , "Number of factorizations"); 63STATISTIC(NumReassoc , "Number of reassociations"); 64 65// Initialization Routines 66void llvm::initializeInstCombine(PassRegistry &Registry) { 67 initializeInstCombinerPass(Registry); 68} 69 70void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 71 initializeInstCombine(*unwrap(R)); 72} 73 74char InstCombiner::ID = 0; 75INITIALIZE_PASS(InstCombiner, "instcombine", 76 "Combine redundant instructions", false, false) 77 78void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 79 AU.addPreservedID(LCSSAID); 80 AU.setPreservesCFG(); 81} 82 83 84/// ShouldChangeType - Return true if it is desirable to convert a computation 85/// from 'From' to 'To'. We don't want to convert from a legal to an illegal 86/// type for example, or from a smaller to a larger illegal type. 87bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const { 88 assert(From->isIntegerTy() && To->isIntegerTy()); 89 90 // If we don't have TD, we don't know if the source/dest are legal. 91 if (!TD) return false; 92 93 unsigned FromWidth = From->getPrimitiveSizeInBits(); 94 unsigned ToWidth = To->getPrimitiveSizeInBits(); 95 bool FromLegal = TD->isLegalInteger(FromWidth); 96 bool ToLegal = TD->isLegalInteger(ToWidth); 97 98 // If this is a legal integer from type, and the result would be an illegal 99 // type, don't do the transformation. 100 if (FromLegal && !ToLegal) 101 return false; 102 103 // Otherwise, if both are illegal, do not increase the size of the result. We 104 // do allow things like i160 -> i64, but not i64 -> i160. 105 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 106 return false; 107 108 return true; 109} 110 111 112/// SimplifyAssociativeOrCommutative - This performs a few simplifications for 113/// operators which are associative or commutative: 114// 115// Commutative operators: 116// 117// 1. Order operands such that they are listed from right (least complex) to 118// left (most complex). This puts constants before unary operators before 119// binary operators. 120// 121// Associative operators: 122// 123// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 124// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 125// 126// Associative and commutative operators: 127// 128// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 129// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 130// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 131// if C1 and C2 are constants. 132// 133bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 134 Instruction::BinaryOps Opcode = I.getOpcode(); 135 bool Changed = false; 136 137 do { 138 // Order operands such that they are listed from right (least complex) to 139 // left (most complex). This puts constants before unary operators before 140 // binary operators. 141 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 142 getComplexity(I.getOperand(1))) 143 Changed = !I.swapOperands(); 144 145 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 146 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 147 148 if (I.isAssociative()) { 149 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 150 if (Op0 && Op0->getOpcode() == Opcode) { 151 Value *A = Op0->getOperand(0); 152 Value *B = Op0->getOperand(1); 153 Value *C = I.getOperand(1); 154 155 // Does "B op C" simplify? 156 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) { 157 // It simplifies to V. Form "A op V". 158 I.setOperand(0, A); 159 I.setOperand(1, V); 160 Changed = true; 161 ++NumReassoc; 162 continue; 163 } 164 } 165 166 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 167 if (Op1 && Op1->getOpcode() == Opcode) { 168 Value *A = I.getOperand(0); 169 Value *B = Op1->getOperand(0); 170 Value *C = Op1->getOperand(1); 171 172 // Does "A op B" simplify? 173 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) { 174 // It simplifies to V. Form "V op C". 175 I.setOperand(0, V); 176 I.setOperand(1, C); 177 Changed = true; 178 ++NumReassoc; 179 continue; 180 } 181 } 182 } 183 184 if (I.isAssociative() && I.isCommutative()) { 185 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 186 if (Op0 && Op0->getOpcode() == Opcode) { 187 Value *A = Op0->getOperand(0); 188 Value *B = Op0->getOperand(1); 189 Value *C = I.getOperand(1); 190 191 // Does "C op A" simplify? 192 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 193 // It simplifies to V. Form "V op B". 194 I.setOperand(0, V); 195 I.setOperand(1, B); 196 Changed = true; 197 ++NumReassoc; 198 continue; 199 } 200 } 201 202 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 203 if (Op1 && Op1->getOpcode() == Opcode) { 204 Value *A = I.getOperand(0); 205 Value *B = Op1->getOperand(0); 206 Value *C = Op1->getOperand(1); 207 208 // Does "C op A" simplify? 209 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 210 // It simplifies to V. Form "B op V". 211 I.setOperand(0, B); 212 I.setOperand(1, V); 213 Changed = true; 214 ++NumReassoc; 215 continue; 216 } 217 } 218 219 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 220 // if C1 and C2 are constants. 221 if (Op0 && Op1 && 222 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 223 isa<Constant>(Op0->getOperand(1)) && 224 isa<Constant>(Op1->getOperand(1)) && 225 Op0->hasOneUse() && Op1->hasOneUse()) { 226 Value *A = Op0->getOperand(0); 227 Constant *C1 = cast<Constant>(Op0->getOperand(1)); 228 Value *B = Op1->getOperand(0); 229 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 230 231 Constant *Folded = ConstantExpr::get(Opcode, C1, C2); 232 Instruction *New = BinaryOperator::Create(Opcode, A, B, Op1->getName(), 233 &I); 234 Worklist.Add(New); 235 I.setOperand(0, New); 236 I.setOperand(1, Folded); 237 Changed = true; 238 continue; 239 } 240 } 241 242 // No further simplifications. 243 return Changed; 244 } while (1); 245} 246 247/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to 248/// "(X LOp Y) ROp (X LOp Z)". 249static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, 250 Instruction::BinaryOps ROp) { 251 switch (LOp) { 252 default: 253 return false; 254 255 case Instruction::And: 256 // And distributes over Or and Xor. 257 switch (ROp) { 258 default: 259 return false; 260 case Instruction::Or: 261 case Instruction::Xor: 262 return true; 263 } 264 265 case Instruction::Mul: 266 // Multiplication distributes over addition and subtraction. 267 switch (ROp) { 268 default: 269 return false; 270 case Instruction::Add: 271 case Instruction::Sub: 272 return true; 273 } 274 275 case Instruction::Or: 276 // Or distributes over And. 277 switch (ROp) { 278 default: 279 return false; 280 case Instruction::And: 281 return true; 282 } 283 } 284} 285 286/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to 287/// "(X ROp Z) LOp (Y ROp Z)". 288static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, 289 Instruction::BinaryOps ROp) { 290 if (Instruction::isCommutative(ROp)) 291 return LeftDistributesOverRight(ROp, LOp); 292 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 293 // but this requires knowing that the addition does not overflow and other 294 // such subtleties. 295 return false; 296} 297 298/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations 299/// which some other binary operation distributes over either by factorizing 300/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this 301/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is 302/// a win). Returns the simplified value, or null if it didn't simplify. 303Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 304 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 305 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 306 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 307 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op 308 309 // Factorization. 310 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) { 311 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 312 // a common term. 313 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1); 314 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1); 315 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 316 317 // Does "X op' Y" always equal "Y op' X"? 318 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 319 320 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 321 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 322 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 323 // commutative case, "(A op' B) op (C op' A)"? 324 if (A == C || (InnerCommutative && A == D)) { 325 if (A != C) 326 std::swap(C, D); 327 // Consider forming "A op' (B op D)". 328 // If "B op D" simplifies then it can be formed with no cost. 329 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD); 330 // If "B op D" doesn't simplify then only go on if both of the existing 331 // operations "A op' B" and "C op' D" will be zapped as no longer used. 332 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 333 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName()); 334 if (V) { 335 ++NumFactor; 336 V = Builder->CreateBinOp(InnerOpcode, A, V); 337 V->takeName(&I); 338 return V; 339 } 340 } 341 342 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 343 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 344 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 345 // commutative case, "(A op' B) op (B op' D)"? 346 if (B == D || (InnerCommutative && B == C)) { 347 if (B != D) 348 std::swap(C, D); 349 // Consider forming "(A op C) op' B". 350 // If "A op C" simplifies then it can be formed with no cost. 351 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD); 352 // If "A op C" doesn't simplify then only go on if both of the existing 353 // operations "A op' B" and "C op' D" will be zapped as no longer used. 354 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 355 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName()); 356 if (V) { 357 ++NumFactor; 358 V = Builder->CreateBinOp(InnerOpcode, V, B); 359 V->takeName(&I); 360 return V; 361 } 362 } 363 } 364 365 // Expansion. 366 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 367 // The instruction has the form "(A op' B) op C". See if expanding it out 368 // to "(A op C) op' (B op C)" results in simplifications. 369 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 370 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 371 372 // Do "A op C" and "B op C" both simplify? 373 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD)) 374 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) { 375 // They do! Return "L op' R". 376 ++NumExpand; 377 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 378 if ((L == A && R == B) || 379 (Instruction::isCommutative(InnerOpcode) && L == B && R == A)) 380 return Op0; 381 // Otherwise return "L op' R" if it simplifies. 382 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 383 return V; 384 // Otherwise, create a new instruction. 385 C = Builder->CreateBinOp(InnerOpcode, L, R); 386 C->takeName(&I); 387 return C; 388 } 389 } 390 391 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 392 // The instruction has the form "A op (B op' C)". See if expanding it out 393 // to "(A op B) op' (A op C)" results in simplifications. 394 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 395 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 396 397 // Do "A op B" and "A op C" both simplify? 398 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD)) 399 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) { 400 // They do! Return "L op' R". 401 ++NumExpand; 402 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 403 if ((L == B && R == C) || 404 (Instruction::isCommutative(InnerOpcode) && L == C && R == B)) 405 return Op1; 406 // Otherwise return "L op' R" if it simplifies. 407 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 408 return V; 409 // Otherwise, create a new instruction. 410 A = Builder->CreateBinOp(InnerOpcode, L, R); 411 A->takeName(&I); 412 return A; 413 } 414 } 415 416 return 0; 417} 418 419// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 420// if the LHS is a constant zero (which is the 'negate' form). 421// 422Value *InstCombiner::dyn_castNegVal(Value *V) const { 423 if (BinaryOperator::isNeg(V)) 424 return BinaryOperator::getNegArgument(V); 425 426 // Constants can be considered to be negated values if they can be folded. 427 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 428 return ConstantExpr::getNeg(C); 429 430 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 431 if (C->getType()->getElementType()->isIntegerTy()) 432 return ConstantExpr::getNeg(C); 433 434 return 0; 435} 436 437// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 438// instruction if the LHS is a constant negative zero (which is the 'negate' 439// form). 440// 441Value *InstCombiner::dyn_castFNegVal(Value *V) const { 442 if (BinaryOperator::isFNeg(V)) 443 return BinaryOperator::getFNegArgument(V); 444 445 // Constants can be considered to be negated values if they can be folded. 446 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 447 return ConstantExpr::getFNeg(C); 448 449 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 450 if (C->getType()->getElementType()->isFloatingPointTy()) 451 return ConstantExpr::getFNeg(C); 452 453 return 0; 454} 455 456static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 457 InstCombiner *IC) { 458 if (CastInst *CI = dyn_cast<CastInst>(&I)) 459 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 460 461 // Figure out if the constant is the left or the right argument. 462 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 463 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 464 465 if (Constant *SOC = dyn_cast<Constant>(SO)) { 466 if (ConstIsRHS) 467 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 468 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 469 } 470 471 Value *Op0 = SO, *Op1 = ConstOperand; 472 if (!ConstIsRHS) 473 std::swap(Op0, Op1); 474 475 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 476 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 477 SO->getName()+".op"); 478 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 479 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 480 SO->getName()+".cmp"); 481 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 482 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 483 SO->getName()+".cmp"); 484 llvm_unreachable("Unknown binary instruction type!"); 485} 486 487// FoldOpIntoSelect - Given an instruction with a select as one operand and a 488// constant as the other operand, try to fold the binary operator into the 489// select arguments. This also works for Cast instructions, which obviously do 490// not have a second operand. 491Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 492 // Don't modify shared select instructions 493 if (!SI->hasOneUse()) return 0; 494 Value *TV = SI->getOperand(1); 495 Value *FV = SI->getOperand(2); 496 497 if (isa<Constant>(TV) || isa<Constant>(FV)) { 498 // Bool selects with constant operands can be folded to logical ops. 499 if (SI->getType()->isIntegerTy(1)) return 0; 500 501 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 502 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 503 504 return SelectInst::Create(SI->getCondition(), SelectTrueVal, 505 SelectFalseVal); 506 } 507 return 0; 508} 509 510 511/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 512/// has a PHI node as operand #0, see if we can fold the instruction into the 513/// PHI (which is only possible if all operands to the PHI are constants). 514/// 515Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { 516 PHINode *PN = cast<PHINode>(I.getOperand(0)); 517 unsigned NumPHIValues = PN->getNumIncomingValues(); 518 if (NumPHIValues == 0) 519 return 0; 520 521 // We normally only transform phis with a single use, unless we're trying 522 // hard to make jump threading happen. However, if a PHI has multiple uses 523 // and they are all the same operation, we can fold *all* of the uses into the 524 // PHI. 525 if (!PN->hasOneUse()) { 526 // Walk the use list for the instruction, comparing them to I. 527 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 528 UI != E; ++UI) 529 if (!I.isIdenticalTo(cast<Instruction>(*UI))) 530 return 0; 531 // Otherwise, we can replace *all* users with the new PHI we form. 532 } 533 534 // Check to see if all of the operands of the PHI are simple constants 535 // (constantint/constantfp/undef). If there is one non-constant value, 536 // remember the BB it is in. If there is more than one or if *it* is a PHI, 537 // bail out. We don't do arbitrary constant expressions here because moving 538 // their computation can be expensive without a cost model. 539 BasicBlock *NonConstBB = 0; 540 for (unsigned i = 0; i != NumPHIValues; ++i) { 541 Value *InVal = PN->getIncomingValue(i); 542 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal)) 543 continue; 544 545 if (isa<PHINode>(InVal)) return 0; // Itself a phi. 546 if (NonConstBB) return 0; // More than one non-const value. 547 548 NonConstBB = PN->getIncomingBlock(i); 549 550 // If the InVal is an invoke at the end of the pred block, then we can't 551 // insert a computation after it without breaking the edge. 552 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) 553 if (II->getParent() == NonConstBB) 554 return 0; 555 556 // If the incoming non-constant value is in I's block, we have an infinite 557 // loop. 558 if (NonConstBB == I.getParent()) 559 return 0; 560 } 561 562 // If there is exactly one non-constant value, we can insert a copy of the 563 // operation in that block. However, if this is a critical edge, we would be 564 // inserting the computation one some other paths (e.g. inside a loop). Only 565 // do this if the pred block is unconditionally branching into the phi block. 566 if (NonConstBB != 0) { 567 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 568 if (!BI || !BI->isUnconditional()) return 0; 569 } 570 571 // Okay, we can do the transformation: create the new PHI node. 572 PHINode *NewPN = PHINode::Create(I.getType(), ""); 573 NewPN->reserveOperandSpace(PN->getNumOperands()/2); 574 InsertNewInstBefore(NewPN, *PN); 575 NewPN->takeName(PN); 576 577 // If we are going to have to insert a new computation, do so right before the 578 // predecessors terminator. 579 if (NonConstBB) 580 Builder->SetInsertPoint(NonConstBB->getTerminator()); 581 582 // Next, add all of the operands to the PHI. 583 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 584 // We only currently try to fold the condition of a select when it is a phi, 585 // not the true/false values. 586 Value *TrueV = SI->getTrueValue(); 587 Value *FalseV = SI->getFalseValue(); 588 BasicBlock *PhiTransBB = PN->getParent(); 589 for (unsigned i = 0; i != NumPHIValues; ++i) { 590 BasicBlock *ThisBB = PN->getIncomingBlock(i); 591 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 592 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 593 Value *InV = 0; 594 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 595 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 596 else 597 InV = Builder->CreateSelect(PN->getIncomingValue(i), 598 TrueVInPred, FalseVInPred, "phitmp"); 599 NewPN->addIncoming(InV, ThisBB); 600 } 601 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 602 Constant *C = cast<Constant>(I.getOperand(1)); 603 for (unsigned i = 0; i != NumPHIValues; ++i) { 604 Value *InV = 0; 605 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 606 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 607 else if (isa<ICmpInst>(CI)) 608 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), 609 C, "phitmp"); 610 else 611 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), 612 C, "phitmp"); 613 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 614 } 615 } else if (I.getNumOperands() == 2) { 616 Constant *C = cast<Constant>(I.getOperand(1)); 617 for (unsigned i = 0; i != NumPHIValues; ++i) { 618 Value *InV = 0; 619 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 620 InV = ConstantExpr::get(I.getOpcode(), InC, C); 621 else 622 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 623 PN->getIncomingValue(i), C, "phitmp"); 624 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 625 } 626 } else { 627 CastInst *CI = cast<CastInst>(&I); 628 const Type *RetTy = CI->getType(); 629 for (unsigned i = 0; i != NumPHIValues; ++i) { 630 Value *InV; 631 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 632 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 633 else 634 InV = Builder->CreateCast(CI->getOpcode(), 635 PN->getIncomingValue(i), I.getType(), "phitmp"); 636 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 637 } 638 } 639 640 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 641 UI != E; ) { 642 Instruction *User = cast<Instruction>(*UI++); 643 if (User == &I) continue; 644 ReplaceInstUsesWith(*User, NewPN); 645 EraseInstFromFunction(*User); 646 } 647 return ReplaceInstUsesWith(I, NewPN); 648} 649 650/// FindElementAtOffset - Given a type and a constant offset, determine whether 651/// or not there is a sequence of GEP indices into the type that will land us at 652/// the specified offset. If so, fill them into NewIndices and return the 653/// resultant element type, otherwise return null. 654const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset, 655 SmallVectorImpl<Value*> &NewIndices) { 656 if (!TD) return 0; 657 if (!Ty->isSized()) return 0; 658 659 // Start with the index over the outer type. Note that the type size 660 // might be zero (even if the offset isn't zero) if the indexed type 661 // is something like [0 x {int, int}] 662 const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 663 int64_t FirstIdx = 0; 664 if (int64_t TySize = TD->getTypeAllocSize(Ty)) { 665 FirstIdx = Offset/TySize; 666 Offset -= FirstIdx*TySize; 667 668 // Handle hosts where % returns negative instead of values [0..TySize). 669 if (Offset < 0) { 670 --FirstIdx; 671 Offset += TySize; 672 assert(Offset >= 0); 673 } 674 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 675 } 676 677 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 678 679 // Index into the types. If we fail, set OrigBase to null. 680 while (Offset) { 681 // Indexing into tail padding between struct/array elements. 682 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty)) 683 return 0; 684 685 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 686 const StructLayout *SL = TD->getStructLayout(STy); 687 assert(Offset < (int64_t)SL->getSizeInBytes() && 688 "Offset must stay within the indexed type"); 689 690 unsigned Elt = SL->getElementContainingOffset(Offset); 691 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 692 Elt)); 693 694 Offset -= SL->getElementOffset(Elt); 695 Ty = STy->getElementType(Elt); 696 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 697 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); 698 assert(EltSize && "Cannot index into a zero-sized array"); 699 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 700 Offset %= EltSize; 701 Ty = AT->getElementType(); 702 } else { 703 // Otherwise, we can't index into the middle of this atomic type, bail. 704 return 0; 705 } 706 } 707 708 return Ty; 709} 710 711 712 713Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 714 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 715 716 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD)) 717 return ReplaceInstUsesWith(GEP, V); 718 719 Value *PtrOp = GEP.getOperand(0); 720 721 // Eliminate unneeded casts for indices, and replace indices which displace 722 // by multiples of a zero size type with zero. 723 if (TD) { 724 bool MadeChange = false; 725 const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); 726 727 gep_type_iterator GTI = gep_type_begin(GEP); 728 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 729 I != E; ++I, ++GTI) { 730 // Skip indices into struct types. 731 const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI); 732 if (!SeqTy) continue; 733 734 // If the element type has zero size then any index over it is equivalent 735 // to an index of zero, so replace it with zero if it is not zero already. 736 if (SeqTy->getElementType()->isSized() && 737 TD->getTypeAllocSize(SeqTy->getElementType()) == 0) 738 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { 739 *I = Constant::getNullValue(IntPtrTy); 740 MadeChange = true; 741 } 742 743 if ((*I)->getType() != IntPtrTy) { 744 // If we are using a wider index than needed for this platform, shrink 745 // it to what we need. If narrower, sign-extend it to what we need. 746 // This explicit cast can make subsequent optimizations more obvious. 747 *I = Builder->CreateIntCast(*I, IntPtrTy, true); 748 MadeChange = true; 749 } 750 } 751 if (MadeChange) return &GEP; 752 } 753 754 // Combine Indices - If the source pointer to this getelementptr instruction 755 // is a getelementptr instruction, combine the indices of the two 756 // getelementptr instructions into a single instruction. 757 // 758 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 759 // Note that if our source is a gep chain itself that we wait for that 760 // chain to be resolved before we perform this transformation. This 761 // avoids us creating a TON of code in some cases. 762 // 763 if (GetElementPtrInst *SrcGEP = 764 dyn_cast<GetElementPtrInst>(Src->getOperand(0))) 765 if (SrcGEP->getNumOperands() == 2) 766 return 0; // Wait until our source is folded to completion. 767 768 SmallVector<Value*, 8> Indices; 769 770 // Find out whether the last index in the source GEP is a sequential idx. 771 bool EndsWithSequential = false; 772 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 773 I != E; ++I) 774 EndsWithSequential = !(*I)->isStructTy(); 775 776 // Can we combine the two pointer arithmetics offsets? 777 if (EndsWithSequential) { 778 // Replace: gep (gep %P, long B), long A, ... 779 // With: T = long A+B; gep %P, T, ... 780 // 781 Value *Sum; 782 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 783 Value *GO1 = GEP.getOperand(1); 784 if (SO1 == Constant::getNullValue(SO1->getType())) { 785 Sum = GO1; 786 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 787 Sum = SO1; 788 } else { 789 // If they aren't the same type, then the input hasn't been processed 790 // by the loop above yet (which canonicalizes sequential index types to 791 // intptr_t). Just avoid transforming this until the input has been 792 // normalized. 793 if (SO1->getType() != GO1->getType()) 794 return 0; 795 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 796 } 797 798 // Update the GEP in place if possible. 799 if (Src->getNumOperands() == 2) { 800 GEP.setOperand(0, Src->getOperand(0)); 801 GEP.setOperand(1, Sum); 802 return &GEP; 803 } 804 Indices.append(Src->op_begin()+1, Src->op_end()-1); 805 Indices.push_back(Sum); 806 Indices.append(GEP.op_begin()+2, GEP.op_end()); 807 } else if (isa<Constant>(*GEP.idx_begin()) && 808 cast<Constant>(*GEP.idx_begin())->isNullValue() && 809 Src->getNumOperands() != 1) { 810 // Otherwise we can do the fold if the first index of the GEP is a zero 811 Indices.append(Src->op_begin()+1, Src->op_end()); 812 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 813 } 814 815 if (!Indices.empty()) 816 return (GEP.isInBounds() && Src->isInBounds()) ? 817 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(), 818 Indices.end(), GEP.getName()) : 819 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(), 820 Indices.end(), GEP.getName()); 821 } 822 823 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 824 Value *StrippedPtr = PtrOp->stripPointerCasts(); 825 if (StrippedPtr != PtrOp) { 826 const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); 827 828 bool HasZeroPointerIndex = false; 829 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 830 HasZeroPointerIndex = C->isZero(); 831 832 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 833 // into : GEP [10 x i8]* X, i32 0, ... 834 // 835 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 836 // into : GEP i8* X, ... 837 // 838 // This occurs when the program declares an array extern like "int X[];" 839 if (HasZeroPointerIndex) { 840 const PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 841 if (const ArrayType *CATy = 842 dyn_cast<ArrayType>(CPTy->getElementType())) { 843 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 844 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 845 // -> GEP i8* X, ... 846 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 847 GetElementPtrInst *Res = 848 GetElementPtrInst::Create(StrippedPtr, Idx.begin(), 849 Idx.end(), GEP.getName()); 850 Res->setIsInBounds(GEP.isInBounds()); 851 return Res; 852 } 853 854 if (const ArrayType *XATy = 855 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 856 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 857 if (CATy->getElementType() == XATy->getElementType()) { 858 // -> GEP [10 x i8]* X, i32 0, ... 859 // At this point, we know that the cast source type is a pointer 860 // to an array of the same type as the destination pointer 861 // array. Because the array type is never stepped over (there 862 // is a leading zero) we can fold the cast into this GEP. 863 GEP.setOperand(0, StrippedPtr); 864 return &GEP; 865 } 866 } 867 } 868 } else if (GEP.getNumOperands() == 2) { 869 // Transform things like: 870 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 871 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 872 const Type *SrcElTy = StrippedPtrTy->getElementType(); 873 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); 874 if (TD && SrcElTy->isArrayTy() && 875 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == 876 TD->getTypeAllocSize(ResElTy)) { 877 Value *Idx[2]; 878 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 879 Idx[1] = GEP.getOperand(1); 880 Value *NewGEP = GEP.isInBounds() ? 881 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) : 882 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 883 // V and GEP are both pointer types --> BitCast 884 return new BitCastInst(NewGEP, GEP.getType()); 885 } 886 887 // Transform things like: 888 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 889 // (where tmp = 8*tmp2) into: 890 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 891 892 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { 893 uint64_t ArrayEltSize = 894 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); 895 896 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We 897 // allow either a mul, shift, or constant here. 898 Value *NewIdx = 0; 899 ConstantInt *Scale = 0; 900 if (ArrayEltSize == 1) { 901 NewIdx = GEP.getOperand(1); 902 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); 903 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) { 904 NewIdx = ConstantInt::get(CI->getType(), 1); 905 Scale = CI; 906 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){ 907 if (Inst->getOpcode() == Instruction::Shl && 908 isa<ConstantInt>(Inst->getOperand(1))) { 909 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); 910 uint32_t ShAmtVal = ShAmt->getLimitedValue(64); 911 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 912 1ULL << ShAmtVal); 913 NewIdx = Inst->getOperand(0); 914 } else if (Inst->getOpcode() == Instruction::Mul && 915 isa<ConstantInt>(Inst->getOperand(1))) { 916 Scale = cast<ConstantInt>(Inst->getOperand(1)); 917 NewIdx = Inst->getOperand(0); 918 } 919 } 920 921 // If the index will be to exactly the right offset with the scale taken 922 // out, perform the transformation. Note, we don't know whether Scale is 923 // signed or not. We'll use unsigned version of division/modulo 924 // operation after making sure Scale doesn't have the sign bit set. 925 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && 926 Scale->getZExtValue() % ArrayEltSize == 0) { 927 Scale = ConstantInt::get(Scale->getType(), 928 Scale->getZExtValue() / ArrayEltSize); 929 if (Scale->getZExtValue() != 1) { 930 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), 931 false /*ZExt*/); 932 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); 933 } 934 935 // Insert the new GEP instruction. 936 Value *Idx[2]; 937 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 938 Idx[1] = NewIdx; 939 Value *NewGEP = GEP.isInBounds() ? 940 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()): 941 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 942 // The NewGEP must be pointer typed, so must the old one -> BitCast 943 return new BitCastInst(NewGEP, GEP.getType()); 944 } 945 } 946 } 947 } 948 949 /// See if we can simplify: 950 /// X = bitcast A* to B* 951 /// Y = gep X, <...constant indices...> 952 /// into a gep of the original struct. This is important for SROA and alias 953 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 954 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 955 if (TD && 956 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) { 957 // Determine how much the GEP moves the pointer. We are guaranteed to get 958 // a constant back from EmitGEPOffset. 959 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP)); 960 int64_t Offset = OffsetV->getSExtValue(); 961 962 // If this GEP instruction doesn't move the pointer, just replace the GEP 963 // with a bitcast of the real input to the dest type. 964 if (Offset == 0) { 965 // If the bitcast is of an allocation, and the allocation will be 966 // converted to match the type of the cast, don't touch this. 967 if (isa<AllocaInst>(BCI->getOperand(0)) || 968 isMalloc(BCI->getOperand(0))) { 969 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 970 if (Instruction *I = visitBitCast(*BCI)) { 971 if (I != BCI) { 972 I->takeName(BCI); 973 BCI->getParent()->getInstList().insert(BCI, I); 974 ReplaceInstUsesWith(*BCI, I); 975 } 976 return &GEP; 977 } 978 } 979 return new BitCastInst(BCI->getOperand(0), GEP.getType()); 980 } 981 982 // Otherwise, if the offset is non-zero, we need to find out if there is a 983 // field at Offset in 'A's type. If so, we can pull the cast through the 984 // GEP. 985 SmallVector<Value*, 8> NewIndices; 986 const Type *InTy = 987 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType(); 988 if (FindElementAtOffset(InTy, Offset, NewIndices)) { 989 Value *NGEP = GEP.isInBounds() ? 990 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(), 991 NewIndices.end()) : 992 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(), 993 NewIndices.end()); 994 995 if (NGEP->getType() == GEP.getType()) 996 return ReplaceInstUsesWith(GEP, NGEP); 997 NGEP->takeName(&GEP); 998 return new BitCastInst(NGEP, GEP.getType()); 999 } 1000 } 1001 } 1002 1003 return 0; 1004} 1005 1006 1007 1008static bool IsOnlyNullComparedAndFreed(const Value &V) { 1009 for (Value::const_use_iterator UI = V.use_begin(), UE = V.use_end(); 1010 UI != UE; ++UI) { 1011 const User *U = *UI; 1012 if (isFreeCall(U)) 1013 continue; 1014 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(U)) 1015 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) 1016 continue; 1017 return false; 1018 } 1019 return true; 1020} 1021 1022Instruction *InstCombiner::visitMalloc(Instruction &MI) { 1023 // If we have a malloc call which is only used in any amount of comparisons 1024 // to null and free calls, delete the calls and replace the comparisons with 1025 // true or false as appropriate. 1026 if (IsOnlyNullComparedAndFreed(MI)) { 1027 for (Value::use_iterator UI = MI.use_begin(), UE = MI.use_end(); 1028 UI != UE;) { 1029 // We can assume that every remaining use is a free call or an icmp eq/ne 1030 // to null, so the cast is safe. 1031 Instruction *I = cast<Instruction>(*UI); 1032 1033 // Early increment here, as we're about to get rid of the user. 1034 ++UI; 1035 1036 if (isFreeCall(I)) { 1037 EraseInstFromFunction(*cast<CallInst>(I)); 1038 continue; 1039 } 1040 // Again, the cast is safe. 1041 ICmpInst *C = cast<ICmpInst>(I); 1042 ReplaceInstUsesWith(*C, ConstantInt::get(Type::getInt1Ty(C->getContext()), 1043 C->isFalseWhenEqual())); 1044 EraseInstFromFunction(*C); 1045 } 1046 return EraseInstFromFunction(MI); 1047 } 1048 return 0; 1049} 1050 1051 1052 1053Instruction *InstCombiner::visitFree(CallInst &FI) { 1054 Value *Op = FI.getArgOperand(0); 1055 1056 // free undef -> unreachable. 1057 if (isa<UndefValue>(Op)) { 1058 // Insert a new store to null because we cannot modify the CFG here. 1059 new StoreInst(ConstantInt::getTrue(FI.getContext()), 1060 UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI); 1061 return EraseInstFromFunction(FI); 1062 } 1063 1064 // If we have 'free null' delete the instruction. This can happen in stl code 1065 // when lots of inlining happens. 1066 if (isa<ConstantPointerNull>(Op)) 1067 return EraseInstFromFunction(FI); 1068 1069 return 0; 1070} 1071 1072 1073 1074Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 1075 // Change br (not X), label True, label False to: br X, label False, True 1076 Value *X = 0; 1077 BasicBlock *TrueDest; 1078 BasicBlock *FalseDest; 1079 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 1080 !isa<Constant>(X)) { 1081 // Swap Destinations and condition... 1082 BI.setCondition(X); 1083 BI.setSuccessor(0, FalseDest); 1084 BI.setSuccessor(1, TrueDest); 1085 return &BI; 1086 } 1087 1088 // Cannonicalize fcmp_one -> fcmp_oeq 1089 FCmpInst::Predicate FPred; Value *Y; 1090 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 1091 TrueDest, FalseDest)) && 1092 BI.getCondition()->hasOneUse()) 1093 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 1094 FPred == FCmpInst::FCMP_OGE) { 1095 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 1096 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 1097 1098 // Swap Destinations and condition. 1099 BI.setSuccessor(0, FalseDest); 1100 BI.setSuccessor(1, TrueDest); 1101 Worklist.Add(Cond); 1102 return &BI; 1103 } 1104 1105 // Cannonicalize icmp_ne -> icmp_eq 1106 ICmpInst::Predicate IPred; 1107 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 1108 TrueDest, FalseDest)) && 1109 BI.getCondition()->hasOneUse()) 1110 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 1111 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 1112 IPred == ICmpInst::ICMP_SGE) { 1113 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 1114 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 1115 // Swap Destinations and condition. 1116 BI.setSuccessor(0, FalseDest); 1117 BI.setSuccessor(1, TrueDest); 1118 Worklist.Add(Cond); 1119 return &BI; 1120 } 1121 1122 return 0; 1123} 1124 1125Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 1126 Value *Cond = SI.getCondition(); 1127 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 1128 if (I->getOpcode() == Instruction::Add) 1129 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 1130 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 1131 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2) 1132 SI.setOperand(i, 1133 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)), 1134 AddRHS)); 1135 SI.setOperand(0, I->getOperand(0)); 1136 Worklist.Add(I); 1137 return &SI; 1138 } 1139 } 1140 return 0; 1141} 1142 1143Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 1144 Value *Agg = EV.getAggregateOperand(); 1145 1146 if (!EV.hasIndices()) 1147 return ReplaceInstUsesWith(EV, Agg); 1148 1149 if (Constant *C = dyn_cast<Constant>(Agg)) { 1150 if (isa<UndefValue>(C)) 1151 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); 1152 1153 if (isa<ConstantAggregateZero>(C)) 1154 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); 1155 1156 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 1157 // Extract the element indexed by the first index out of the constant 1158 Value *V = C->getOperand(*EV.idx_begin()); 1159 if (EV.getNumIndices() > 1) 1160 // Extract the remaining indices out of the constant indexed by the 1161 // first index 1162 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end()); 1163 else 1164 return ReplaceInstUsesWith(EV, V); 1165 } 1166 return 0; // Can't handle other constants 1167 } 1168 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 1169 // We're extracting from an insertvalue instruction, compare the indices 1170 const unsigned *exti, *exte, *insi, *inse; 1171 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 1172 exte = EV.idx_end(), inse = IV->idx_end(); 1173 exti != exte && insi != inse; 1174 ++exti, ++insi) { 1175 if (*insi != *exti) 1176 // The insert and extract both reference distinctly different elements. 1177 // This means the extract is not influenced by the insert, and we can 1178 // replace the aggregate operand of the extract with the aggregate 1179 // operand of the insert. i.e., replace 1180 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1181 // %E = extractvalue { i32, { i32 } } %I, 0 1182 // with 1183 // %E = extractvalue { i32, { i32 } } %A, 0 1184 return ExtractValueInst::Create(IV->getAggregateOperand(), 1185 EV.idx_begin(), EV.idx_end()); 1186 } 1187 if (exti == exte && insi == inse) 1188 // Both iterators are at the end: Index lists are identical. Replace 1189 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1190 // %C = extractvalue { i32, { i32 } } %B, 1, 0 1191 // with "i32 42" 1192 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 1193 if (exti == exte) { 1194 // The extract list is a prefix of the insert list. i.e. replace 1195 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1196 // %E = extractvalue { i32, { i32 } } %I, 1 1197 // with 1198 // %X = extractvalue { i32, { i32 } } %A, 1 1199 // %E = insertvalue { i32 } %X, i32 42, 0 1200 // by switching the order of the insert and extract (though the 1201 // insertvalue should be left in, since it may have other uses). 1202 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 1203 EV.idx_begin(), EV.idx_end()); 1204 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 1205 insi, inse); 1206 } 1207 if (insi == inse) 1208 // The insert list is a prefix of the extract list 1209 // We can simply remove the common indices from the extract and make it 1210 // operate on the inserted value instead of the insertvalue result. 1211 // i.e., replace 1212 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1213 // %E = extractvalue { i32, { i32 } } %I, 1, 0 1214 // with 1215 // %E extractvalue { i32 } { i32 42 }, 0 1216 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 1217 exti, exte); 1218 } 1219 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 1220 // We're extracting from an intrinsic, see if we're the only user, which 1221 // allows us to simplify multiple result intrinsics to simpler things that 1222 // just get one value. 1223 if (II->hasOneUse()) { 1224 // Check if we're grabbing the overflow bit or the result of a 'with 1225 // overflow' intrinsic. If it's the latter we can remove the intrinsic 1226 // and replace it with a traditional binary instruction. 1227 switch (II->getIntrinsicID()) { 1228 case Intrinsic::uadd_with_overflow: 1229 case Intrinsic::sadd_with_overflow: 1230 if (*EV.idx_begin() == 0) { // Normal result. 1231 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1232 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1233 EraseInstFromFunction(*II); 1234 return BinaryOperator::CreateAdd(LHS, RHS); 1235 } 1236 1237 // If the normal result of the add is dead, and the RHS is a constant, 1238 // we can transform this into a range comparison. 1239 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 1240 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) 1241 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1))) 1242 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), 1243 ConstantExpr::getNot(CI)); 1244 break; 1245 case Intrinsic::usub_with_overflow: 1246 case Intrinsic::ssub_with_overflow: 1247 if (*EV.idx_begin() == 0) { // Normal result. 1248 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1249 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1250 EraseInstFromFunction(*II); 1251 return BinaryOperator::CreateSub(LHS, RHS); 1252 } 1253 break; 1254 case Intrinsic::umul_with_overflow: 1255 case Intrinsic::smul_with_overflow: 1256 if (*EV.idx_begin() == 0) { // Normal result. 1257 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1258 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1259 EraseInstFromFunction(*II); 1260 return BinaryOperator::CreateMul(LHS, RHS); 1261 } 1262 break; 1263 default: 1264 break; 1265 } 1266 } 1267 } 1268 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 1269 // If the (non-volatile) load only has one use, we can rewrite this to a 1270 // load from a GEP. This reduces the size of the load. 1271 // FIXME: If a load is used only by extractvalue instructions then this 1272 // could be done regardless of having multiple uses. 1273 if (!L->isVolatile() && L->hasOneUse()) { 1274 // extractvalue has integer indices, getelementptr has Value*s. Convert. 1275 SmallVector<Value*, 4> Indices; 1276 // Prefix an i32 0 since we need the first element. 1277 Indices.push_back(Builder->getInt32(0)); 1278 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 1279 I != E; ++I) 1280 Indices.push_back(Builder->getInt32(*I)); 1281 1282 // We need to insert these at the location of the old load, not at that of 1283 // the extractvalue. 1284 Builder->SetInsertPoint(L->getParent(), L); 1285 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), 1286 Indices.begin(), Indices.end()); 1287 // Returning the load directly will cause the main loop to insert it in 1288 // the wrong spot, so use ReplaceInstUsesWith(). 1289 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); 1290 } 1291 // We could simplify extracts from other values. Note that nested extracts may 1292 // already be simplified implicitly by the above: extract (extract (insert) ) 1293 // will be translated into extract ( insert ( extract ) ) first and then just 1294 // the value inserted, if appropriate. Similarly for extracts from single-use 1295 // loads: extract (extract (load)) will be translated to extract (load (gep)) 1296 // and if again single-use then via load (gep (gep)) to load (gep). 1297 // However, double extracts from e.g. function arguments or return values 1298 // aren't handled yet. 1299 return 0; 1300} 1301 1302 1303 1304 1305/// TryToSinkInstruction - Try to move the specified instruction from its 1306/// current block into the beginning of DestBlock, which can only happen if it's 1307/// safe to move the instruction past all of the instructions between it and the 1308/// end of its block. 1309static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 1310 assert(I->hasOneUse() && "Invariants didn't hold!"); 1311 1312 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 1313 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I)) 1314 return false; 1315 1316 // Do not sink alloca instructions out of the entry block. 1317 if (isa<AllocaInst>(I) && I->getParent() == 1318 &DestBlock->getParent()->getEntryBlock()) 1319 return false; 1320 1321 // We can only sink load instructions if there is nothing between the load and 1322 // the end of block that could change the value. 1323 if (I->mayReadFromMemory()) { 1324 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 1325 Scan != E; ++Scan) 1326 if (Scan->mayWriteToMemory()) 1327 return false; 1328 } 1329 1330 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI(); 1331 1332 I->moveBefore(InsertPos); 1333 ++NumSunkInst; 1334 return true; 1335} 1336 1337 1338/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 1339/// all reachable code to the worklist. 1340/// 1341/// This has a couple of tricks to make the code faster and more powerful. In 1342/// particular, we constant fold and DCE instructions as we go, to avoid adding 1343/// them to the worklist (this significantly speeds up instcombine on code where 1344/// many instructions are dead or constant). Additionally, if we find a branch 1345/// whose condition is a known constant, we only visit the reachable successors. 1346/// 1347static bool AddReachableCodeToWorklist(BasicBlock *BB, 1348 SmallPtrSet<BasicBlock*, 64> &Visited, 1349 InstCombiner &IC, 1350 const TargetData *TD) { 1351 bool MadeIRChange = false; 1352 SmallVector<BasicBlock*, 256> Worklist; 1353 Worklist.push_back(BB); 1354 1355 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 1356 SmallPtrSet<ConstantExpr*, 64> FoldedConstants; 1357 1358 do { 1359 BB = Worklist.pop_back_val(); 1360 1361 // We have now visited this block! If we've already been here, ignore it. 1362 if (!Visited.insert(BB)) continue; 1363 1364 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 1365 Instruction *Inst = BBI++; 1366 1367 // DCE instruction if trivially dead. 1368 if (isInstructionTriviallyDead(Inst)) { 1369 ++NumDeadInst; 1370 DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); 1371 Inst->eraseFromParent(); 1372 continue; 1373 } 1374 1375 // ConstantProp instruction if trivially constant. 1376 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 1377 if (Constant *C = ConstantFoldInstruction(Inst, TD)) { 1378 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " 1379 << *Inst << '\n'); 1380 Inst->replaceAllUsesWith(C); 1381 ++NumConstProp; 1382 Inst->eraseFromParent(); 1383 continue; 1384 } 1385 1386 if (TD) { 1387 // See if we can constant fold its operands. 1388 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 1389 i != e; ++i) { 1390 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 1391 if (CE == 0) continue; 1392 1393 // If we already folded this constant, don't try again. 1394 if (!FoldedConstants.insert(CE)) 1395 continue; 1396 1397 Constant *NewC = ConstantFoldConstantExpression(CE, TD); 1398 if (NewC && NewC != CE) { 1399 *i = NewC; 1400 MadeIRChange = true; 1401 } 1402 } 1403 } 1404 1405 InstrsForInstCombineWorklist.push_back(Inst); 1406 } 1407 1408 // Recursively visit successors. If this is a branch or switch on a 1409 // constant, only visit the reachable successor. 1410 TerminatorInst *TI = BB->getTerminator(); 1411 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1412 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 1413 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 1414 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 1415 Worklist.push_back(ReachableBB); 1416 continue; 1417 } 1418 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1419 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 1420 // See if this is an explicit destination. 1421 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) 1422 if (SI->getCaseValue(i) == Cond) { 1423 BasicBlock *ReachableBB = SI->getSuccessor(i); 1424 Worklist.push_back(ReachableBB); 1425 continue; 1426 } 1427 1428 // Otherwise it is the default destination. 1429 Worklist.push_back(SI->getSuccessor(0)); 1430 continue; 1431 } 1432 } 1433 1434 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 1435 Worklist.push_back(TI->getSuccessor(i)); 1436 } while (!Worklist.empty()); 1437 1438 // Once we've found all of the instructions to add to instcombine's worklist, 1439 // add them in reverse order. This way instcombine will visit from the top 1440 // of the function down. This jives well with the way that it adds all uses 1441 // of instructions to the worklist after doing a transformation, thus avoiding 1442 // some N^2 behavior in pathological cases. 1443 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 1444 InstrsForInstCombineWorklist.size()); 1445 1446 return MadeIRChange; 1447} 1448 1449bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 1450 MadeIRChange = false; 1451 1452 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 1453 << F.getNameStr() << "\n"); 1454 1455 { 1456 // Do a depth-first traversal of the function, populate the worklist with 1457 // the reachable instructions. Ignore blocks that are not reachable. Keep 1458 // track of which blocks we visit. 1459 SmallPtrSet<BasicBlock*, 64> Visited; 1460 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD); 1461 1462 // Do a quick scan over the function. If we find any blocks that are 1463 // unreachable, remove any instructions inside of them. This prevents 1464 // the instcombine code from having to deal with some bad special cases. 1465 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 1466 if (!Visited.count(BB)) { 1467 Instruction *Term = BB->getTerminator(); 1468 while (Term != BB->begin()) { // Remove instrs bottom-up 1469 BasicBlock::iterator I = Term; --I; 1470 1471 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1472 // A debug intrinsic shouldn't force another iteration if we weren't 1473 // going to do one without it. 1474 if (!isa<DbgInfoIntrinsic>(I)) { 1475 ++NumDeadInst; 1476 MadeIRChange = true; 1477 } 1478 1479 // If I is not void type then replaceAllUsesWith undef. 1480 // This allows ValueHandlers and custom metadata to adjust itself. 1481 if (!I->getType()->isVoidTy()) 1482 I->replaceAllUsesWith(UndefValue::get(I->getType())); 1483 I->eraseFromParent(); 1484 } 1485 } 1486 } 1487 1488 while (!Worklist.isEmpty()) { 1489 Instruction *I = Worklist.RemoveOne(); 1490 if (I == 0) continue; // skip null values. 1491 1492 // Check to see if we can DCE the instruction. 1493 if (isInstructionTriviallyDead(I)) { 1494 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1495 EraseInstFromFunction(*I); 1496 ++NumDeadInst; 1497 MadeIRChange = true; 1498 continue; 1499 } 1500 1501 // Instruction isn't dead, see if we can constant propagate it. 1502 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 1503 if (Constant *C = ConstantFoldInstruction(I, TD)) { 1504 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 1505 1506 // Add operands to the worklist. 1507 ReplaceInstUsesWith(*I, C); 1508 ++NumConstProp; 1509 EraseInstFromFunction(*I); 1510 MadeIRChange = true; 1511 continue; 1512 } 1513 1514 // See if we can trivially sink this instruction to a successor basic block. 1515 if (I->hasOneUse()) { 1516 BasicBlock *BB = I->getParent(); 1517 Instruction *UserInst = cast<Instruction>(I->use_back()); 1518 BasicBlock *UserParent; 1519 1520 // Get the block the use occurs in. 1521 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 1522 UserParent = PN->getIncomingBlock(I->use_begin().getUse()); 1523 else 1524 UserParent = UserInst->getParent(); 1525 1526 if (UserParent != BB) { 1527 bool UserIsSuccessor = false; 1528 // See if the user is one of our successors. 1529 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 1530 if (*SI == UserParent) { 1531 UserIsSuccessor = true; 1532 break; 1533 } 1534 1535 // If the user is one of our immediate successors, and if that successor 1536 // only has us as a predecessors (we'd have to split the critical edge 1537 // otherwise), we can keep going. 1538 if (UserIsSuccessor && UserParent->getSinglePredecessor()) 1539 // Okay, the CFG is simple enough, try to sink this instruction. 1540 MadeIRChange |= TryToSinkInstruction(I, UserParent); 1541 } 1542 } 1543 1544 // Now that we have an instruction, try combining it to simplify it. 1545 Builder->SetInsertPoint(I->getParent(), I); 1546 1547#ifndef NDEBUG 1548 std::string OrigI; 1549#endif 1550 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 1551 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n'); 1552 1553 if (Instruction *Result = visit(*I)) { 1554 ++NumCombined; 1555 // Should we replace the old instruction with a new one? 1556 if (Result != I) { 1557 DEBUG(errs() << "IC: Old = " << *I << '\n' 1558 << " New = " << *Result << '\n'); 1559 1560 // Everything uses the new instruction now. 1561 I->replaceAllUsesWith(Result); 1562 1563 // Push the new instruction and any users onto the worklist. 1564 Worklist.Add(Result); 1565 Worklist.AddUsersToWorkList(*Result); 1566 1567 // Move the name to the new instruction first. 1568 Result->takeName(I); 1569 1570 // Insert the new instruction into the basic block... 1571 BasicBlock *InstParent = I->getParent(); 1572 BasicBlock::iterator InsertPos = I; 1573 1574 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert 1575 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs. 1576 ++InsertPos; 1577 1578 InstParent->getInstList().insert(InsertPos, Result); 1579 1580 EraseInstFromFunction(*I); 1581 } else { 1582#ifndef NDEBUG 1583 DEBUG(errs() << "IC: Mod = " << OrigI << '\n' 1584 << " New = " << *I << '\n'); 1585#endif 1586 1587 // If the instruction was modified, it's possible that it is now dead. 1588 // if so, remove it. 1589 if (isInstructionTriviallyDead(I)) { 1590 EraseInstFromFunction(*I); 1591 } else { 1592 Worklist.Add(I); 1593 Worklist.AddUsersToWorkList(*I); 1594 } 1595 } 1596 MadeIRChange = true; 1597 } 1598 } 1599 1600 Worklist.Zap(); 1601 return MadeIRChange; 1602} 1603 1604 1605bool InstCombiner::runOnFunction(Function &F) { 1606 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID); 1607 TD = getAnalysisIfAvailable<TargetData>(); 1608 1609 1610 /// Builder - This is an IRBuilder that automatically inserts new 1611 /// instructions into the worklist when they are created. 1612 IRBuilder<true, TargetFolder, InstCombineIRInserter> 1613 TheBuilder(F.getContext(), TargetFolder(TD), 1614 InstCombineIRInserter(Worklist)); 1615 Builder = &TheBuilder; 1616 1617 bool EverMadeChange = false; 1618 1619 // Iterate while there is work to do. 1620 unsigned Iteration = 0; 1621 while (DoOneIteration(F, Iteration++)) 1622 EverMadeChange = true; 1623 1624 Builder = 0; 1625 return EverMadeChange; 1626} 1627 1628FunctionPass *llvm::createInstructionCombiningPass() { 1629 return new InstCombiner(); 1630} 1631