InstructionCombining.cpp revision 6eb6116d52a729b36ab9089e656267e09cc6207a
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// InstructionCombining - Combine instructions to form fewer, simple 11// instructions. This pass does not modify the CFG. This pass is where 12// algebraic simplification happens. 13// 14// This pass combines things like: 15// %Y = add i32 %X, 1 16// %Z = add i32 %Y, 1 17// into: 18// %Z = add i32 %X, 2 19// 20// This is a simple worklist driven algorithm. 21// 22// This pass guarantees that the following canonicalizations are performed on 23// the program: 24// 1. If a binary operator has a constant operand, it is moved to the RHS 25// 2. Bitwise operators with constant operands are always grouped so that 26// shifts are performed first, then or's, then and's, then xor's. 27// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 28// 4. All cmp instructions on boolean values are replaced with logical ops 29// 5. add X, X is represented as (X*2) => (X << 1) 30// 6. Multiplies with a power-of-two constant argument are transformed into 31// shifts. 32// ... etc. 33// 34//===----------------------------------------------------------------------===// 35 36#define DEBUG_TYPE "instcombine" 37#include "llvm/Transforms/Scalar.h" 38#include "InstCombine.h" 39#include "llvm/IntrinsicInst.h" 40#include "llvm/Analysis/ConstantFolding.h" 41#include "llvm/Analysis/InstructionSimplify.h" 42#include "llvm/Analysis/MemoryBuiltins.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Transforms/Utils/Local.h" 45#include "llvm/Support/CFG.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/GetElementPtrTypeIterator.h" 48#include "llvm/Support/PatternMatch.h" 49#include "llvm/ADT/SmallPtrSet.h" 50#include "llvm/ADT/Statistic.h" 51#include "llvm-c/Initialization.h" 52#include <algorithm> 53#include <climits> 54using namespace llvm; 55using namespace llvm::PatternMatch; 56 57STATISTIC(NumCombined , "Number of insts combined"); 58STATISTIC(NumConstProp, "Number of constant folds"); 59STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 60STATISTIC(NumSunkInst , "Number of instructions sunk"); 61STATISTIC(NumExpand, "Number of expansions"); 62STATISTIC(NumFactor , "Number of factorizations"); 63STATISTIC(NumReassoc , "Number of reassociations"); 64 65// Initialization Routines 66void llvm::initializeInstCombine(PassRegistry &Registry) { 67 initializeInstCombinerPass(Registry); 68} 69 70void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 71 initializeInstCombine(*unwrap(R)); 72} 73 74char InstCombiner::ID = 0; 75INITIALIZE_PASS(InstCombiner, "instcombine", 76 "Combine redundant instructions", false, false) 77 78void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 79 AU.addPreservedID(LCSSAID); 80 AU.setPreservesCFG(); 81} 82 83 84/// ShouldChangeType - Return true if it is desirable to convert a computation 85/// from 'From' to 'To'. We don't want to convert from a legal to an illegal 86/// type for example, or from a smaller to a larger illegal type. 87bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const { 88 assert(From->isIntegerTy() && To->isIntegerTy()); 89 90 // If we don't have TD, we don't know if the source/dest are legal. 91 if (!TD) return false; 92 93 unsigned FromWidth = From->getPrimitiveSizeInBits(); 94 unsigned ToWidth = To->getPrimitiveSizeInBits(); 95 bool FromLegal = TD->isLegalInteger(FromWidth); 96 bool ToLegal = TD->isLegalInteger(ToWidth); 97 98 // If this is a legal integer from type, and the result would be an illegal 99 // type, don't do the transformation. 100 if (FromLegal && !ToLegal) 101 return false; 102 103 // Otherwise, if both are illegal, do not increase the size of the result. We 104 // do allow things like i160 -> i64, but not i64 -> i160. 105 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 106 return false; 107 108 return true; 109} 110 111 112/// SimplifyAssociativeOrCommutative - This performs a few simplifications for 113/// operators which are associative or commutative: 114// 115// Commutative operators: 116// 117// 1. Order operands such that they are listed from right (least complex) to 118// left (most complex). This puts constants before unary operators before 119// binary operators. 120// 121// Associative operators: 122// 123// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 124// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 125// 126// Associative and commutative operators: 127// 128// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 129// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 130// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 131// if C1 and C2 are constants. 132// 133bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 134 Instruction::BinaryOps Opcode = I.getOpcode(); 135 bool Changed = false; 136 137 do { 138 // Order operands such that they are listed from right (least complex) to 139 // left (most complex). This puts constants before unary operators before 140 // binary operators. 141 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 142 getComplexity(I.getOperand(1))) 143 Changed = !I.swapOperands(); 144 145 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 146 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 147 148 if (I.isAssociative()) { 149 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 150 if (Op0 && Op0->getOpcode() == Opcode) { 151 Value *A = Op0->getOperand(0); 152 Value *B = Op0->getOperand(1); 153 Value *C = I.getOperand(1); 154 155 // Does "B op C" simplify? 156 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) { 157 // It simplifies to V. Form "A op V". 158 I.setOperand(0, A); 159 I.setOperand(1, V); 160 Changed = true; 161 ++NumReassoc; 162 continue; 163 } 164 } 165 166 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 167 if (Op1 && Op1->getOpcode() == Opcode) { 168 Value *A = I.getOperand(0); 169 Value *B = Op1->getOperand(0); 170 Value *C = Op1->getOperand(1); 171 172 // Does "A op B" simplify? 173 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) { 174 // It simplifies to V. Form "V op C". 175 I.setOperand(0, V); 176 I.setOperand(1, C); 177 Changed = true; 178 ++NumReassoc; 179 continue; 180 } 181 } 182 } 183 184 if (I.isAssociative() && I.isCommutative()) { 185 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 186 if (Op0 && Op0->getOpcode() == Opcode) { 187 Value *A = Op0->getOperand(0); 188 Value *B = Op0->getOperand(1); 189 Value *C = I.getOperand(1); 190 191 // Does "C op A" simplify? 192 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 193 // It simplifies to V. Form "V op B". 194 I.setOperand(0, V); 195 I.setOperand(1, B); 196 Changed = true; 197 ++NumReassoc; 198 continue; 199 } 200 } 201 202 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 203 if (Op1 && Op1->getOpcode() == Opcode) { 204 Value *A = I.getOperand(0); 205 Value *B = Op1->getOperand(0); 206 Value *C = Op1->getOperand(1); 207 208 // Does "C op A" simplify? 209 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 210 // It simplifies to V. Form "B op V". 211 I.setOperand(0, B); 212 I.setOperand(1, V); 213 Changed = true; 214 ++NumReassoc; 215 continue; 216 } 217 } 218 219 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 220 // if C1 and C2 are constants. 221 if (Op0 && Op1 && 222 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 223 isa<Constant>(Op0->getOperand(1)) && 224 isa<Constant>(Op1->getOperand(1)) && 225 Op0->hasOneUse() && Op1->hasOneUse()) { 226 Value *A = Op0->getOperand(0); 227 Constant *C1 = cast<Constant>(Op0->getOperand(1)); 228 Value *B = Op1->getOperand(0); 229 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 230 231 Constant *Folded = ConstantExpr::get(Opcode, C1, C2); 232 Instruction *New = BinaryOperator::Create(Opcode, A, B, Op1->getName(), 233 &I); 234 Worklist.Add(New); 235 I.setOperand(0, New); 236 I.setOperand(1, Folded); 237 Changed = true; 238 continue; 239 } 240 } 241 242 // No further simplifications. 243 return Changed; 244 } while (1); 245} 246 247/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to 248/// "(X LOp Y) ROp (X LOp Z)". 249static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, 250 Instruction::BinaryOps ROp) { 251 switch (LOp) { 252 default: 253 return false; 254 255 case Instruction::And: 256 // And distributes over Or and Xor. 257 switch (ROp) { 258 default: 259 return false; 260 case Instruction::Or: 261 case Instruction::Xor: 262 return true; 263 } 264 265 case Instruction::Mul: 266 // Multiplication distributes over addition and subtraction. 267 switch (ROp) { 268 default: 269 return false; 270 case Instruction::Add: 271 case Instruction::Sub: 272 return true; 273 } 274 275 case Instruction::Or: 276 // Or distributes over And. 277 switch (ROp) { 278 default: 279 return false; 280 case Instruction::And: 281 return true; 282 } 283 } 284} 285 286/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to 287/// "(X ROp Z) LOp (Y ROp Z)". 288static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, 289 Instruction::BinaryOps ROp) { 290 if (Instruction::isCommutative(ROp)) 291 return LeftDistributesOverRight(ROp, LOp); 292 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 293 // but this requires knowing that the addition does not overflow and other 294 // such subtleties. 295 return false; 296} 297 298/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations 299/// which some other binary operation distributes over either by factorizing 300/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this 301/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is 302/// a win). Returns the simplified value, or null if it didn't simplify. 303Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 304 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 305 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 306 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 307 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op 308 309 // Factorization. 310 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) { 311 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 312 // a common term. 313 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1); 314 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1); 315 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 316 317 // Does "X op' Y" always equal "Y op' X"? 318 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 319 320 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 321 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 322 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 323 // commutative case, "(A op' B) op (C op' A)"? 324 if (A == C || (InnerCommutative && A == D)) { 325 if (A != C) 326 std::swap(C, D); 327 // Consider forming "A op' (B op D)". 328 // If "B op D" simplifies then it can be formed with no cost. 329 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD); 330 // If "B op D" doesn't simplify then only go on if both of the existing 331 // operations "A op' B" and "C op' D" will be zapped as no longer used. 332 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 333 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName()); 334 if (V) { 335 ++NumFactor; 336 V = Builder->CreateBinOp(InnerOpcode, A, V); 337 V->takeName(&I); 338 return V; 339 } 340 } 341 342 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 343 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 344 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 345 // commutative case, "(A op' B) op (B op' D)"? 346 if (B == D || (InnerCommutative && B == C)) { 347 if (B != D) 348 std::swap(C, D); 349 // Consider forming "(A op C) op' B". 350 // If "A op C" simplifies then it can be formed with no cost. 351 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD); 352 // If "A op C" doesn't simplify then only go on if both of the existing 353 // operations "A op' B" and "C op' D" will be zapped as no longer used. 354 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 355 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName()); 356 if (V) { 357 ++NumFactor; 358 V = Builder->CreateBinOp(InnerOpcode, V, B); 359 V->takeName(&I); 360 return V; 361 } 362 } 363 } 364 365 // Expansion. 366 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 367 // The instruction has the form "(A op' B) op C". See if expanding it out 368 // to "(A op C) op' (B op C)" results in simplifications. 369 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 370 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 371 372 // Do "A op C" and "B op C" both simplify? 373 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD)) 374 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) { 375 // They do! Return "L op' R". 376 ++NumExpand; 377 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 378 if ((L == A && R == B) || 379 (Instruction::isCommutative(InnerOpcode) && L == B && R == A)) 380 return Op0; 381 // Otherwise return "L op' R" if it simplifies. 382 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 383 return V; 384 // Otherwise, create a new instruction. 385 C = Builder->CreateBinOp(InnerOpcode, L, R); 386 C->takeName(&I); 387 return C; 388 } 389 } 390 391 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 392 // The instruction has the form "A op (B op' C)". See if expanding it out 393 // to "(A op B) op' (A op C)" results in simplifications. 394 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 395 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 396 397 // Do "A op B" and "A op C" both simplify? 398 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD)) 399 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) { 400 // They do! Return "L op' R". 401 ++NumExpand; 402 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 403 if ((L == B && R == C) || 404 (Instruction::isCommutative(InnerOpcode) && L == C && R == B)) 405 return Op1; 406 // Otherwise return "L op' R" if it simplifies. 407 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 408 return V; 409 // Otherwise, create a new instruction. 410 A = Builder->CreateBinOp(InnerOpcode, L, R); 411 A->takeName(&I); 412 return A; 413 } 414 } 415 416 return 0; 417} 418 419// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 420// if the LHS is a constant zero (which is the 'negate' form). 421// 422Value *InstCombiner::dyn_castNegVal(Value *V) const { 423 if (BinaryOperator::isNeg(V)) 424 return BinaryOperator::getNegArgument(V); 425 426 // Constants can be considered to be negated values if they can be folded. 427 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 428 return ConstantExpr::getNeg(C); 429 430 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 431 if (C->getType()->getElementType()->isIntegerTy()) 432 return ConstantExpr::getNeg(C); 433 434 return 0; 435} 436 437// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 438// instruction if the LHS is a constant negative zero (which is the 'negate' 439// form). 440// 441Value *InstCombiner::dyn_castFNegVal(Value *V) const { 442 if (BinaryOperator::isFNeg(V)) 443 return BinaryOperator::getFNegArgument(V); 444 445 // Constants can be considered to be negated values if they can be folded. 446 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 447 return ConstantExpr::getFNeg(C); 448 449 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 450 if (C->getType()->getElementType()->isFloatingPointTy()) 451 return ConstantExpr::getFNeg(C); 452 453 return 0; 454} 455 456static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 457 InstCombiner *IC) { 458 if (CastInst *CI = dyn_cast<CastInst>(&I)) 459 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 460 461 // Figure out if the constant is the left or the right argument. 462 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 463 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 464 465 if (Constant *SOC = dyn_cast<Constant>(SO)) { 466 if (ConstIsRHS) 467 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 468 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 469 } 470 471 Value *Op0 = SO, *Op1 = ConstOperand; 472 if (!ConstIsRHS) 473 std::swap(Op0, Op1); 474 475 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 476 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 477 SO->getName()+".op"); 478 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 479 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 480 SO->getName()+".cmp"); 481 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 482 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 483 SO->getName()+".cmp"); 484 llvm_unreachable("Unknown binary instruction type!"); 485} 486 487// FoldOpIntoSelect - Given an instruction with a select as one operand and a 488// constant as the other operand, try to fold the binary operator into the 489// select arguments. This also works for Cast instructions, which obviously do 490// not have a second operand. 491Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 492 // Don't modify shared select instructions 493 if (!SI->hasOneUse()) return 0; 494 Value *TV = SI->getOperand(1); 495 Value *FV = SI->getOperand(2); 496 497 if (isa<Constant>(TV) || isa<Constant>(FV)) { 498 // Bool selects with constant operands can be folded to logical ops. 499 if (SI->getType()->isIntegerTy(1)) return 0; 500 501 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 502 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 503 504 return SelectInst::Create(SI->getCondition(), SelectTrueVal, 505 SelectFalseVal); 506 } 507 return 0; 508} 509 510 511/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 512/// has a PHI node as operand #0, see if we can fold the instruction into the 513/// PHI (which is only possible if all operands to the PHI are constants). 514/// 515Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { 516 PHINode *PN = cast<PHINode>(I.getOperand(0)); 517 unsigned NumPHIValues = PN->getNumIncomingValues(); 518 if (NumPHIValues == 0) 519 return 0; 520 521 // We normally only transform phis with a single use, unless we're trying 522 // hard to make jump threading happen. However, if a PHI has multiple uses 523 // and they are all the same operation, we can fold *all* of the uses into the 524 // PHI. 525 if (!PN->hasOneUse()) { 526 // Walk the use list for the instruction, comparing them to I. 527 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 528 UI != E; ++UI) 529 if (!I.isIdenticalTo(cast<Instruction>(*UI))) 530 return 0; 531 // Otherwise, we can replace *all* users with the new PHI we form. 532 } 533 534 // Check to see if all of the operands of the PHI are simple constants 535 // (constantint/constantfp/undef). If there is one non-constant value, 536 // remember the BB it is in. If there is more than one or if *it* is a PHI, 537 // bail out. We don't do arbitrary constant expressions here because moving 538 // their computation can be expensive without a cost model. 539 BasicBlock *NonConstBB = 0; 540 for (unsigned i = 0; i != NumPHIValues; ++i) { 541 Value *InVal = PN->getIncomingValue(i); 542 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal)) 543 continue; 544 545 if (isa<PHINode>(InVal)) return 0; // Itself a phi. 546 if (NonConstBB) return 0; // More than one non-const value. 547 548 NonConstBB = PN->getIncomingBlock(i); 549 550 // If the InVal is an invoke at the end of the pred block, then we can't 551 // insert a computation after it without breaking the edge. 552 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) 553 if (II->getParent() == NonConstBB) 554 return 0; 555 } 556 557 // If there is exactly one non-constant value, we can insert a copy of the 558 // operation in that block. However, if this is a critical edge, we would be 559 // inserting the computation one some other paths (e.g. inside a loop). Only 560 // do this if the pred block is unconditionally branching into the phi block. 561 if (NonConstBB != 0) { 562 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 563 if (!BI || !BI->isUnconditional()) return 0; 564 } 565 566 // Okay, we can do the transformation: create the new PHI node. 567 PHINode *NewPN = PHINode::Create(I.getType(), ""); 568 NewPN->reserveOperandSpace(PN->getNumOperands()/2); 569 InsertNewInstBefore(NewPN, *PN); 570 NewPN->takeName(PN); 571 572 // If we are going to have to insert a new computation, do so right before the 573 // predecessors terminator. 574 if (NonConstBB) 575 Builder->SetInsertPoint(NonConstBB->getTerminator()); 576 577 // Next, add all of the operands to the PHI. 578 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 579 // We only currently try to fold the condition of a select when it is a phi, 580 // not the true/false values. 581 Value *TrueV = SI->getTrueValue(); 582 Value *FalseV = SI->getFalseValue(); 583 BasicBlock *PhiTransBB = PN->getParent(); 584 for (unsigned i = 0; i != NumPHIValues; ++i) { 585 BasicBlock *ThisBB = PN->getIncomingBlock(i); 586 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 587 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 588 Value *InV = 0; 589 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 590 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 591 else 592 InV = Builder->CreateSelect(PN->getIncomingValue(i), 593 TrueVInPred, FalseVInPred, "phitmp"); 594 NewPN->addIncoming(InV, ThisBB); 595 } 596 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 597 Constant *C = cast<Constant>(I.getOperand(1)); 598 for (unsigned i = 0; i != NumPHIValues; ++i) { 599 Value *InV = 0; 600 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 601 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 602 else if (isa<ICmpInst>(CI)) 603 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), 604 C, "phitmp"); 605 else 606 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), 607 C, "phitmp"); 608 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 609 } 610 } else if (I.getNumOperands() == 2) { 611 Constant *C = cast<Constant>(I.getOperand(1)); 612 for (unsigned i = 0; i != NumPHIValues; ++i) { 613 Value *InV = 0; 614 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 615 InV = ConstantExpr::get(I.getOpcode(), InC, C); 616 else 617 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 618 PN->getIncomingValue(i), C, "phitmp"); 619 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 620 } 621 } else { 622 CastInst *CI = cast<CastInst>(&I); 623 const Type *RetTy = CI->getType(); 624 for (unsigned i = 0; i != NumPHIValues; ++i) { 625 Value *InV; 626 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 627 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 628 else 629 InV = Builder->CreateCast(CI->getOpcode(), 630 PN->getIncomingValue(i), I.getType(), "phitmp"); 631 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 632 } 633 } 634 635 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 636 UI != E; ) { 637 Instruction *User = cast<Instruction>(*UI++); 638 if (User == &I) continue; 639 ReplaceInstUsesWith(*User, NewPN); 640 EraseInstFromFunction(*User); 641 } 642 return ReplaceInstUsesWith(I, NewPN); 643} 644 645/// FindElementAtOffset - Given a type and a constant offset, determine whether 646/// or not there is a sequence of GEP indices into the type that will land us at 647/// the specified offset. If so, fill them into NewIndices and return the 648/// resultant element type, otherwise return null. 649const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset, 650 SmallVectorImpl<Value*> &NewIndices) { 651 if (!TD) return 0; 652 if (!Ty->isSized()) return 0; 653 654 // Start with the index over the outer type. Note that the type size 655 // might be zero (even if the offset isn't zero) if the indexed type 656 // is something like [0 x {int, int}] 657 const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 658 int64_t FirstIdx = 0; 659 if (int64_t TySize = TD->getTypeAllocSize(Ty)) { 660 FirstIdx = Offset/TySize; 661 Offset -= FirstIdx*TySize; 662 663 // Handle hosts where % returns negative instead of values [0..TySize). 664 if (Offset < 0) { 665 --FirstIdx; 666 Offset += TySize; 667 assert(Offset >= 0); 668 } 669 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 670 } 671 672 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 673 674 // Index into the types. If we fail, set OrigBase to null. 675 while (Offset) { 676 // Indexing into tail padding between struct/array elements. 677 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty)) 678 return 0; 679 680 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 681 const StructLayout *SL = TD->getStructLayout(STy); 682 assert(Offset < (int64_t)SL->getSizeInBytes() && 683 "Offset must stay within the indexed type"); 684 685 unsigned Elt = SL->getElementContainingOffset(Offset); 686 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 687 Elt)); 688 689 Offset -= SL->getElementOffset(Elt); 690 Ty = STy->getElementType(Elt); 691 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 692 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); 693 assert(EltSize && "Cannot index into a zero-sized array"); 694 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 695 Offset %= EltSize; 696 Ty = AT->getElementType(); 697 } else { 698 // Otherwise, we can't index into the middle of this atomic type, bail. 699 return 0; 700 } 701 } 702 703 return Ty; 704} 705 706 707 708Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 709 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 710 711 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD)) 712 return ReplaceInstUsesWith(GEP, V); 713 714 Value *PtrOp = GEP.getOperand(0); 715 716 // Eliminate unneeded casts for indices, and replace indices which displace 717 // by multiples of a zero size type with zero. 718 if (TD) { 719 bool MadeChange = false; 720 const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); 721 722 gep_type_iterator GTI = gep_type_begin(GEP); 723 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 724 I != E; ++I, ++GTI) { 725 // Skip indices into struct types. 726 const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI); 727 if (!SeqTy) continue; 728 729 // If the element type has zero size then any index over it is equivalent 730 // to an index of zero, so replace it with zero if it is not zero already. 731 if (SeqTy->getElementType()->isSized() && 732 TD->getTypeAllocSize(SeqTy->getElementType()) == 0) 733 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { 734 *I = Constant::getNullValue(IntPtrTy); 735 MadeChange = true; 736 } 737 738 if ((*I)->getType() != IntPtrTy) { 739 // If we are using a wider index than needed for this platform, shrink 740 // it to what we need. If narrower, sign-extend it to what we need. 741 // This explicit cast can make subsequent optimizations more obvious. 742 *I = Builder->CreateIntCast(*I, IntPtrTy, true); 743 MadeChange = true; 744 } 745 } 746 if (MadeChange) return &GEP; 747 } 748 749 // Combine Indices - If the source pointer to this getelementptr instruction 750 // is a getelementptr instruction, combine the indices of the two 751 // getelementptr instructions into a single instruction. 752 // 753 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 754 // Note that if our source is a gep chain itself that we wait for that 755 // chain to be resolved before we perform this transformation. This 756 // avoids us creating a TON of code in some cases. 757 // 758 if (GetElementPtrInst *SrcGEP = 759 dyn_cast<GetElementPtrInst>(Src->getOperand(0))) 760 if (SrcGEP->getNumOperands() == 2) 761 return 0; // Wait until our source is folded to completion. 762 763 SmallVector<Value*, 8> Indices; 764 765 // Find out whether the last index in the source GEP is a sequential idx. 766 bool EndsWithSequential = false; 767 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 768 I != E; ++I) 769 EndsWithSequential = !(*I)->isStructTy(); 770 771 // Can we combine the two pointer arithmetics offsets? 772 if (EndsWithSequential) { 773 // Replace: gep (gep %P, long B), long A, ... 774 // With: T = long A+B; gep %P, T, ... 775 // 776 Value *Sum; 777 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 778 Value *GO1 = GEP.getOperand(1); 779 if (SO1 == Constant::getNullValue(SO1->getType())) { 780 Sum = GO1; 781 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 782 Sum = SO1; 783 } else { 784 // If they aren't the same type, then the input hasn't been processed 785 // by the loop above yet (which canonicalizes sequential index types to 786 // intptr_t). Just avoid transforming this until the input has been 787 // normalized. 788 if (SO1->getType() != GO1->getType()) 789 return 0; 790 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 791 } 792 793 // Update the GEP in place if possible. 794 if (Src->getNumOperands() == 2) { 795 GEP.setOperand(0, Src->getOperand(0)); 796 GEP.setOperand(1, Sum); 797 return &GEP; 798 } 799 Indices.append(Src->op_begin()+1, Src->op_end()-1); 800 Indices.push_back(Sum); 801 Indices.append(GEP.op_begin()+2, GEP.op_end()); 802 } else if (isa<Constant>(*GEP.idx_begin()) && 803 cast<Constant>(*GEP.idx_begin())->isNullValue() && 804 Src->getNumOperands() != 1) { 805 // Otherwise we can do the fold if the first index of the GEP is a zero 806 Indices.append(Src->op_begin()+1, Src->op_end()); 807 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 808 } 809 810 if (!Indices.empty()) 811 return (GEP.isInBounds() && Src->isInBounds()) ? 812 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(), 813 Indices.end(), GEP.getName()) : 814 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(), 815 Indices.end(), GEP.getName()); 816 } 817 818 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 819 Value *StrippedPtr = PtrOp->stripPointerCasts(); 820 if (StrippedPtr != PtrOp) { 821 const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); 822 823 bool HasZeroPointerIndex = false; 824 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 825 HasZeroPointerIndex = C->isZero(); 826 827 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 828 // into : GEP [10 x i8]* X, i32 0, ... 829 // 830 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 831 // into : GEP i8* X, ... 832 // 833 // This occurs when the program declares an array extern like "int X[];" 834 if (HasZeroPointerIndex) { 835 const PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 836 if (const ArrayType *CATy = 837 dyn_cast<ArrayType>(CPTy->getElementType())) { 838 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 839 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 840 // -> GEP i8* X, ... 841 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 842 GetElementPtrInst *Res = 843 GetElementPtrInst::Create(StrippedPtr, Idx.begin(), 844 Idx.end(), GEP.getName()); 845 Res->setIsInBounds(GEP.isInBounds()); 846 return Res; 847 } 848 849 if (const ArrayType *XATy = 850 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 851 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 852 if (CATy->getElementType() == XATy->getElementType()) { 853 // -> GEP [10 x i8]* X, i32 0, ... 854 // At this point, we know that the cast source type is a pointer 855 // to an array of the same type as the destination pointer 856 // array. Because the array type is never stepped over (there 857 // is a leading zero) we can fold the cast into this GEP. 858 GEP.setOperand(0, StrippedPtr); 859 return &GEP; 860 } 861 } 862 } 863 } else if (GEP.getNumOperands() == 2) { 864 // Transform things like: 865 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 866 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 867 const Type *SrcElTy = StrippedPtrTy->getElementType(); 868 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); 869 if (TD && SrcElTy->isArrayTy() && 870 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == 871 TD->getTypeAllocSize(ResElTy)) { 872 Value *Idx[2]; 873 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 874 Idx[1] = GEP.getOperand(1); 875 Value *NewGEP = GEP.isInBounds() ? 876 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) : 877 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 878 // V and GEP are both pointer types --> BitCast 879 return new BitCastInst(NewGEP, GEP.getType()); 880 } 881 882 // Transform things like: 883 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 884 // (where tmp = 8*tmp2) into: 885 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 886 887 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { 888 uint64_t ArrayEltSize = 889 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); 890 891 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We 892 // allow either a mul, shift, or constant here. 893 Value *NewIdx = 0; 894 ConstantInt *Scale = 0; 895 if (ArrayEltSize == 1) { 896 NewIdx = GEP.getOperand(1); 897 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); 898 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) { 899 NewIdx = ConstantInt::get(CI->getType(), 1); 900 Scale = CI; 901 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){ 902 if (Inst->getOpcode() == Instruction::Shl && 903 isa<ConstantInt>(Inst->getOperand(1))) { 904 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); 905 uint32_t ShAmtVal = ShAmt->getLimitedValue(64); 906 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 907 1ULL << ShAmtVal); 908 NewIdx = Inst->getOperand(0); 909 } else if (Inst->getOpcode() == Instruction::Mul && 910 isa<ConstantInt>(Inst->getOperand(1))) { 911 Scale = cast<ConstantInt>(Inst->getOperand(1)); 912 NewIdx = Inst->getOperand(0); 913 } 914 } 915 916 // If the index will be to exactly the right offset with the scale taken 917 // out, perform the transformation. Note, we don't know whether Scale is 918 // signed or not. We'll use unsigned version of division/modulo 919 // operation after making sure Scale doesn't have the sign bit set. 920 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && 921 Scale->getZExtValue() % ArrayEltSize == 0) { 922 Scale = ConstantInt::get(Scale->getType(), 923 Scale->getZExtValue() / ArrayEltSize); 924 if (Scale->getZExtValue() != 1) { 925 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), 926 false /*ZExt*/); 927 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); 928 } 929 930 // Insert the new GEP instruction. 931 Value *Idx[2]; 932 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 933 Idx[1] = NewIdx; 934 Value *NewGEP = GEP.isInBounds() ? 935 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()): 936 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 937 // The NewGEP must be pointer typed, so must the old one -> BitCast 938 return new BitCastInst(NewGEP, GEP.getType()); 939 } 940 } 941 } 942 } 943 944 /// See if we can simplify: 945 /// X = bitcast A* to B* 946 /// Y = gep X, <...constant indices...> 947 /// into a gep of the original struct. This is important for SROA and alias 948 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 949 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 950 if (TD && 951 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) { 952 // Determine how much the GEP moves the pointer. We are guaranteed to get 953 // a constant back from EmitGEPOffset. 954 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP)); 955 int64_t Offset = OffsetV->getSExtValue(); 956 957 // If this GEP instruction doesn't move the pointer, just replace the GEP 958 // with a bitcast of the real input to the dest type. 959 if (Offset == 0) { 960 // If the bitcast is of an allocation, and the allocation will be 961 // converted to match the type of the cast, don't touch this. 962 if (isa<AllocaInst>(BCI->getOperand(0)) || 963 isMalloc(BCI->getOperand(0))) { 964 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 965 if (Instruction *I = visitBitCast(*BCI)) { 966 if (I != BCI) { 967 I->takeName(BCI); 968 BCI->getParent()->getInstList().insert(BCI, I); 969 ReplaceInstUsesWith(*BCI, I); 970 } 971 return &GEP; 972 } 973 } 974 return new BitCastInst(BCI->getOperand(0), GEP.getType()); 975 } 976 977 // Otherwise, if the offset is non-zero, we need to find out if there is a 978 // field at Offset in 'A's type. If so, we can pull the cast through the 979 // GEP. 980 SmallVector<Value*, 8> NewIndices; 981 const Type *InTy = 982 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType(); 983 if (FindElementAtOffset(InTy, Offset, NewIndices)) { 984 Value *NGEP = GEP.isInBounds() ? 985 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(), 986 NewIndices.end()) : 987 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(), 988 NewIndices.end()); 989 990 if (NGEP->getType() == GEP.getType()) 991 return ReplaceInstUsesWith(GEP, NGEP); 992 NGEP->takeName(&GEP); 993 return new BitCastInst(NGEP, GEP.getType()); 994 } 995 } 996 } 997 998 return 0; 999} 1000 1001 1002 1003static bool IsOnlyNullComparedAndFreed(const Value &V) { 1004 for (Value::const_use_iterator UI = V.use_begin(), UE = V.use_end(); 1005 UI != UE; ++UI) { 1006 const User *U = *UI; 1007 if (isFreeCall(U)) 1008 continue; 1009 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(U)) 1010 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) 1011 continue; 1012 return false; 1013 } 1014 return true; 1015} 1016 1017Instruction *InstCombiner::visitMalloc(Instruction &MI) { 1018 // If we have a malloc call which is only used in any amount of comparisons 1019 // to null and free calls, delete the calls and replace the comparisons with 1020 // true or false as appropriate. 1021 if (IsOnlyNullComparedAndFreed(MI)) { 1022 for (Value::use_iterator UI = MI.use_begin(), UE = MI.use_end(); 1023 UI != UE;) { 1024 // We can assume that every remaining use is a free call or an icmp eq/ne 1025 // to null, so the cast is safe. 1026 Instruction *I = cast<Instruction>(*UI); 1027 1028 // Early increment here, as we're about to get rid of the user. 1029 ++UI; 1030 1031 if (isFreeCall(I)) { 1032 EraseInstFromFunction(*cast<CallInst>(I)); 1033 continue; 1034 } 1035 // Again, the cast is safe. 1036 ICmpInst *C = cast<ICmpInst>(I); 1037 ReplaceInstUsesWith(*C, ConstantInt::get(Type::getInt1Ty(C->getContext()), 1038 C->isFalseWhenEqual())); 1039 EraseInstFromFunction(*C); 1040 } 1041 return EraseInstFromFunction(MI); 1042 } 1043 return 0; 1044} 1045 1046 1047 1048Instruction *InstCombiner::visitFree(CallInst &FI) { 1049 Value *Op = FI.getArgOperand(0); 1050 1051 // free undef -> unreachable. 1052 if (isa<UndefValue>(Op)) { 1053 // Insert a new store to null because we cannot modify the CFG here. 1054 new StoreInst(ConstantInt::getTrue(FI.getContext()), 1055 UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI); 1056 return EraseInstFromFunction(FI); 1057 } 1058 1059 // If we have 'free null' delete the instruction. This can happen in stl code 1060 // when lots of inlining happens. 1061 if (isa<ConstantPointerNull>(Op)) 1062 return EraseInstFromFunction(FI); 1063 1064 return 0; 1065} 1066 1067 1068 1069Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 1070 // Change br (not X), label True, label False to: br X, label False, True 1071 Value *X = 0; 1072 BasicBlock *TrueDest; 1073 BasicBlock *FalseDest; 1074 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 1075 !isa<Constant>(X)) { 1076 // Swap Destinations and condition... 1077 BI.setCondition(X); 1078 BI.setSuccessor(0, FalseDest); 1079 BI.setSuccessor(1, TrueDest); 1080 return &BI; 1081 } 1082 1083 // Cannonicalize fcmp_one -> fcmp_oeq 1084 FCmpInst::Predicate FPred; Value *Y; 1085 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 1086 TrueDest, FalseDest)) && 1087 BI.getCondition()->hasOneUse()) 1088 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 1089 FPred == FCmpInst::FCMP_OGE) { 1090 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 1091 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 1092 1093 // Swap Destinations and condition. 1094 BI.setSuccessor(0, FalseDest); 1095 BI.setSuccessor(1, TrueDest); 1096 Worklist.Add(Cond); 1097 return &BI; 1098 } 1099 1100 // Cannonicalize icmp_ne -> icmp_eq 1101 ICmpInst::Predicate IPred; 1102 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 1103 TrueDest, FalseDest)) && 1104 BI.getCondition()->hasOneUse()) 1105 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 1106 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 1107 IPred == ICmpInst::ICMP_SGE) { 1108 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 1109 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 1110 // Swap Destinations and condition. 1111 BI.setSuccessor(0, FalseDest); 1112 BI.setSuccessor(1, TrueDest); 1113 Worklist.Add(Cond); 1114 return &BI; 1115 } 1116 1117 return 0; 1118} 1119 1120Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 1121 Value *Cond = SI.getCondition(); 1122 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 1123 if (I->getOpcode() == Instruction::Add) 1124 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 1125 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 1126 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2) 1127 SI.setOperand(i, 1128 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)), 1129 AddRHS)); 1130 SI.setOperand(0, I->getOperand(0)); 1131 Worklist.Add(I); 1132 return &SI; 1133 } 1134 } 1135 return 0; 1136} 1137 1138Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 1139 Value *Agg = EV.getAggregateOperand(); 1140 1141 if (!EV.hasIndices()) 1142 return ReplaceInstUsesWith(EV, Agg); 1143 1144 if (Constant *C = dyn_cast<Constant>(Agg)) { 1145 if (isa<UndefValue>(C)) 1146 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); 1147 1148 if (isa<ConstantAggregateZero>(C)) 1149 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); 1150 1151 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 1152 // Extract the element indexed by the first index out of the constant 1153 Value *V = C->getOperand(*EV.idx_begin()); 1154 if (EV.getNumIndices() > 1) 1155 // Extract the remaining indices out of the constant indexed by the 1156 // first index 1157 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end()); 1158 else 1159 return ReplaceInstUsesWith(EV, V); 1160 } 1161 return 0; // Can't handle other constants 1162 } 1163 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 1164 // We're extracting from an insertvalue instruction, compare the indices 1165 const unsigned *exti, *exte, *insi, *inse; 1166 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 1167 exte = EV.idx_end(), inse = IV->idx_end(); 1168 exti != exte && insi != inse; 1169 ++exti, ++insi) { 1170 if (*insi != *exti) 1171 // The insert and extract both reference distinctly different elements. 1172 // This means the extract is not influenced by the insert, and we can 1173 // replace the aggregate operand of the extract with the aggregate 1174 // operand of the insert. i.e., replace 1175 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1176 // %E = extractvalue { i32, { i32 } } %I, 0 1177 // with 1178 // %E = extractvalue { i32, { i32 } } %A, 0 1179 return ExtractValueInst::Create(IV->getAggregateOperand(), 1180 EV.idx_begin(), EV.idx_end()); 1181 } 1182 if (exti == exte && insi == inse) 1183 // Both iterators are at the end: Index lists are identical. Replace 1184 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1185 // %C = extractvalue { i32, { i32 } } %B, 1, 0 1186 // with "i32 42" 1187 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 1188 if (exti == exte) { 1189 // The extract list is a prefix of the insert list. i.e. replace 1190 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1191 // %E = extractvalue { i32, { i32 } } %I, 1 1192 // with 1193 // %X = extractvalue { i32, { i32 } } %A, 1 1194 // %E = insertvalue { i32 } %X, i32 42, 0 1195 // by switching the order of the insert and extract (though the 1196 // insertvalue should be left in, since it may have other uses). 1197 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 1198 EV.idx_begin(), EV.idx_end()); 1199 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 1200 insi, inse); 1201 } 1202 if (insi == inse) 1203 // The insert list is a prefix of the extract list 1204 // We can simply remove the common indices from the extract and make it 1205 // operate on the inserted value instead of the insertvalue result. 1206 // i.e., replace 1207 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1208 // %E = extractvalue { i32, { i32 } } %I, 1, 0 1209 // with 1210 // %E extractvalue { i32 } { i32 42 }, 0 1211 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 1212 exti, exte); 1213 } 1214 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 1215 // We're extracting from an intrinsic, see if we're the only user, which 1216 // allows us to simplify multiple result intrinsics to simpler things that 1217 // just get one value. 1218 if (II->hasOneUse()) { 1219 // Check if we're grabbing the overflow bit or the result of a 'with 1220 // overflow' intrinsic. If it's the latter we can remove the intrinsic 1221 // and replace it with a traditional binary instruction. 1222 switch (II->getIntrinsicID()) { 1223 case Intrinsic::uadd_with_overflow: 1224 case Intrinsic::sadd_with_overflow: 1225 if (*EV.idx_begin() == 0) { // Normal result. 1226 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1227 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1228 EraseInstFromFunction(*II); 1229 return BinaryOperator::CreateAdd(LHS, RHS); 1230 } 1231 1232 // If the normal result of the add is dead, and the RHS is a constant, 1233 // we can transform this into a range comparison. 1234 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 1235 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) 1236 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1))) 1237 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), 1238 ConstantExpr::getNot(CI)); 1239 break; 1240 case Intrinsic::usub_with_overflow: 1241 case Intrinsic::ssub_with_overflow: 1242 if (*EV.idx_begin() == 0) { // Normal result. 1243 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1244 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1245 EraseInstFromFunction(*II); 1246 return BinaryOperator::CreateSub(LHS, RHS); 1247 } 1248 break; 1249 case Intrinsic::umul_with_overflow: 1250 case Intrinsic::smul_with_overflow: 1251 if (*EV.idx_begin() == 0) { // Normal result. 1252 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1253 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1254 EraseInstFromFunction(*II); 1255 return BinaryOperator::CreateMul(LHS, RHS); 1256 } 1257 break; 1258 default: 1259 break; 1260 } 1261 } 1262 } 1263 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 1264 // If the (non-volatile) load only has one use, we can rewrite this to a 1265 // load from a GEP. This reduces the size of the load. 1266 // FIXME: If a load is used only by extractvalue instructions then this 1267 // could be done regardless of having multiple uses. 1268 if (!L->isVolatile() && L->hasOneUse()) { 1269 // extractvalue has integer indices, getelementptr has Value*s. Convert. 1270 SmallVector<Value*, 4> Indices; 1271 // Prefix an i32 0 since we need the first element. 1272 Indices.push_back(Builder->getInt32(0)); 1273 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 1274 I != E; ++I) 1275 Indices.push_back(Builder->getInt32(*I)); 1276 1277 // We need to insert these at the location of the old load, not at that of 1278 // the extractvalue. 1279 Builder->SetInsertPoint(L->getParent(), L); 1280 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), 1281 Indices.begin(), Indices.end()); 1282 // Returning the load directly will cause the main loop to insert it in 1283 // the wrong spot, so use ReplaceInstUsesWith(). 1284 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); 1285 } 1286 // We could simplify extracts from other values. Note that nested extracts may 1287 // already be simplified implicitly by the above: extract (extract (insert) ) 1288 // will be translated into extract ( insert ( extract ) ) first and then just 1289 // the value inserted, if appropriate. Similarly for extracts from single-use 1290 // loads: extract (extract (load)) will be translated to extract (load (gep)) 1291 // and if again single-use then via load (gep (gep)) to load (gep). 1292 // However, double extracts from e.g. function arguments or return values 1293 // aren't handled yet. 1294 return 0; 1295} 1296 1297 1298 1299 1300/// TryToSinkInstruction - Try to move the specified instruction from its 1301/// current block into the beginning of DestBlock, which can only happen if it's 1302/// safe to move the instruction past all of the instructions between it and the 1303/// end of its block. 1304static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 1305 assert(I->hasOneUse() && "Invariants didn't hold!"); 1306 1307 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 1308 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I)) 1309 return false; 1310 1311 // Do not sink alloca instructions out of the entry block. 1312 if (isa<AllocaInst>(I) && I->getParent() == 1313 &DestBlock->getParent()->getEntryBlock()) 1314 return false; 1315 1316 // We can only sink load instructions if there is nothing between the load and 1317 // the end of block that could change the value. 1318 if (I->mayReadFromMemory()) { 1319 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 1320 Scan != E; ++Scan) 1321 if (Scan->mayWriteToMemory()) 1322 return false; 1323 } 1324 1325 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI(); 1326 1327 I->moveBefore(InsertPos); 1328 ++NumSunkInst; 1329 return true; 1330} 1331 1332 1333/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 1334/// all reachable code to the worklist. 1335/// 1336/// This has a couple of tricks to make the code faster and more powerful. In 1337/// particular, we constant fold and DCE instructions as we go, to avoid adding 1338/// them to the worklist (this significantly speeds up instcombine on code where 1339/// many instructions are dead or constant). Additionally, if we find a branch 1340/// whose condition is a known constant, we only visit the reachable successors. 1341/// 1342static bool AddReachableCodeToWorklist(BasicBlock *BB, 1343 SmallPtrSet<BasicBlock*, 64> &Visited, 1344 InstCombiner &IC, 1345 const TargetData *TD) { 1346 bool MadeIRChange = false; 1347 SmallVector<BasicBlock*, 256> Worklist; 1348 Worklist.push_back(BB); 1349 1350 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 1351 SmallPtrSet<ConstantExpr*, 64> FoldedConstants; 1352 1353 do { 1354 BB = Worklist.pop_back_val(); 1355 1356 // We have now visited this block! If we've already been here, ignore it. 1357 if (!Visited.insert(BB)) continue; 1358 1359 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 1360 Instruction *Inst = BBI++; 1361 1362 // DCE instruction if trivially dead. 1363 if (isInstructionTriviallyDead(Inst)) { 1364 ++NumDeadInst; 1365 DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); 1366 Inst->eraseFromParent(); 1367 continue; 1368 } 1369 1370 // ConstantProp instruction if trivially constant. 1371 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 1372 if (Constant *C = ConstantFoldInstruction(Inst, TD)) { 1373 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " 1374 << *Inst << '\n'); 1375 Inst->replaceAllUsesWith(C); 1376 ++NumConstProp; 1377 Inst->eraseFromParent(); 1378 continue; 1379 } 1380 1381 if (TD) { 1382 // See if we can constant fold its operands. 1383 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 1384 i != e; ++i) { 1385 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 1386 if (CE == 0) continue; 1387 1388 // If we already folded this constant, don't try again. 1389 if (!FoldedConstants.insert(CE)) 1390 continue; 1391 1392 Constant *NewC = ConstantFoldConstantExpression(CE, TD); 1393 if (NewC && NewC != CE) { 1394 *i = NewC; 1395 MadeIRChange = true; 1396 } 1397 } 1398 } 1399 1400 InstrsForInstCombineWorklist.push_back(Inst); 1401 } 1402 1403 // Recursively visit successors. If this is a branch or switch on a 1404 // constant, only visit the reachable successor. 1405 TerminatorInst *TI = BB->getTerminator(); 1406 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1407 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 1408 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 1409 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 1410 Worklist.push_back(ReachableBB); 1411 continue; 1412 } 1413 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1414 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 1415 // See if this is an explicit destination. 1416 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) 1417 if (SI->getCaseValue(i) == Cond) { 1418 BasicBlock *ReachableBB = SI->getSuccessor(i); 1419 Worklist.push_back(ReachableBB); 1420 continue; 1421 } 1422 1423 // Otherwise it is the default destination. 1424 Worklist.push_back(SI->getSuccessor(0)); 1425 continue; 1426 } 1427 } 1428 1429 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 1430 Worklist.push_back(TI->getSuccessor(i)); 1431 } while (!Worklist.empty()); 1432 1433 // Once we've found all of the instructions to add to instcombine's worklist, 1434 // add them in reverse order. This way instcombine will visit from the top 1435 // of the function down. This jives well with the way that it adds all uses 1436 // of instructions to the worklist after doing a transformation, thus avoiding 1437 // some N^2 behavior in pathological cases. 1438 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 1439 InstrsForInstCombineWorklist.size()); 1440 1441 return MadeIRChange; 1442} 1443 1444bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 1445 MadeIRChange = false; 1446 1447 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 1448 << F.getNameStr() << "\n"); 1449 1450 { 1451 // Do a depth-first traversal of the function, populate the worklist with 1452 // the reachable instructions. Ignore blocks that are not reachable. Keep 1453 // track of which blocks we visit. 1454 SmallPtrSet<BasicBlock*, 64> Visited; 1455 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD); 1456 1457 // Do a quick scan over the function. If we find any blocks that are 1458 // unreachable, remove any instructions inside of them. This prevents 1459 // the instcombine code from having to deal with some bad special cases. 1460 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 1461 if (!Visited.count(BB)) { 1462 Instruction *Term = BB->getTerminator(); 1463 while (Term != BB->begin()) { // Remove instrs bottom-up 1464 BasicBlock::iterator I = Term; --I; 1465 1466 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1467 // A debug intrinsic shouldn't force another iteration if we weren't 1468 // going to do one without it. 1469 if (!isa<DbgInfoIntrinsic>(I)) { 1470 ++NumDeadInst; 1471 MadeIRChange = true; 1472 } 1473 1474 // If I is not void type then replaceAllUsesWith undef. 1475 // This allows ValueHandlers and custom metadata to adjust itself. 1476 if (!I->getType()->isVoidTy()) 1477 I->replaceAllUsesWith(UndefValue::get(I->getType())); 1478 I->eraseFromParent(); 1479 } 1480 } 1481 } 1482 1483 while (!Worklist.isEmpty()) { 1484 Instruction *I = Worklist.RemoveOne(); 1485 if (I == 0) continue; // skip null values. 1486 1487 // Check to see if we can DCE the instruction. 1488 if (isInstructionTriviallyDead(I)) { 1489 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1490 EraseInstFromFunction(*I); 1491 ++NumDeadInst; 1492 MadeIRChange = true; 1493 continue; 1494 } 1495 1496 // Instruction isn't dead, see if we can constant propagate it. 1497 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 1498 if (Constant *C = ConstantFoldInstruction(I, TD)) { 1499 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 1500 1501 // Add operands to the worklist. 1502 ReplaceInstUsesWith(*I, C); 1503 ++NumConstProp; 1504 EraseInstFromFunction(*I); 1505 MadeIRChange = true; 1506 continue; 1507 } 1508 1509 // See if we can trivially sink this instruction to a successor basic block. 1510 if (I->hasOneUse()) { 1511 BasicBlock *BB = I->getParent(); 1512 Instruction *UserInst = cast<Instruction>(I->use_back()); 1513 BasicBlock *UserParent; 1514 1515 // Get the block the use occurs in. 1516 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 1517 UserParent = PN->getIncomingBlock(I->use_begin().getUse()); 1518 else 1519 UserParent = UserInst->getParent(); 1520 1521 if (UserParent != BB) { 1522 bool UserIsSuccessor = false; 1523 // See if the user is one of our successors. 1524 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 1525 if (*SI == UserParent) { 1526 UserIsSuccessor = true; 1527 break; 1528 } 1529 1530 // If the user is one of our immediate successors, and if that successor 1531 // only has us as a predecessors (we'd have to split the critical edge 1532 // otherwise), we can keep going. 1533 if (UserIsSuccessor && UserParent->getSinglePredecessor()) 1534 // Okay, the CFG is simple enough, try to sink this instruction. 1535 MadeIRChange |= TryToSinkInstruction(I, UserParent); 1536 } 1537 } 1538 1539 // Now that we have an instruction, try combining it to simplify it. 1540 Builder->SetInsertPoint(I->getParent(), I); 1541 1542#ifndef NDEBUG 1543 std::string OrigI; 1544#endif 1545 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 1546 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n'); 1547 1548 if (Instruction *Result = visit(*I)) { 1549 ++NumCombined; 1550 // Should we replace the old instruction with a new one? 1551 if (Result != I) { 1552 DEBUG(errs() << "IC: Old = " << *I << '\n' 1553 << " New = " << *Result << '\n'); 1554 1555 // Everything uses the new instruction now. 1556 I->replaceAllUsesWith(Result); 1557 1558 // Push the new instruction and any users onto the worklist. 1559 Worklist.Add(Result); 1560 Worklist.AddUsersToWorkList(*Result); 1561 1562 // Move the name to the new instruction first. 1563 Result->takeName(I); 1564 1565 // Insert the new instruction into the basic block... 1566 BasicBlock *InstParent = I->getParent(); 1567 BasicBlock::iterator InsertPos = I; 1568 1569 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert 1570 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs. 1571 ++InsertPos; 1572 1573 InstParent->getInstList().insert(InsertPos, Result); 1574 1575 EraseInstFromFunction(*I); 1576 } else { 1577#ifndef NDEBUG 1578 DEBUG(errs() << "IC: Mod = " << OrigI << '\n' 1579 << " New = " << *I << '\n'); 1580#endif 1581 1582 // If the instruction was modified, it's possible that it is now dead. 1583 // if so, remove it. 1584 if (isInstructionTriviallyDead(I)) { 1585 EraseInstFromFunction(*I); 1586 } else { 1587 Worklist.Add(I); 1588 Worklist.AddUsersToWorkList(*I); 1589 } 1590 } 1591 MadeIRChange = true; 1592 } 1593 } 1594 1595 Worklist.Zap(); 1596 return MadeIRChange; 1597} 1598 1599 1600bool InstCombiner::runOnFunction(Function &F) { 1601 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID); 1602 TD = getAnalysisIfAvailable<TargetData>(); 1603 1604 1605 /// Builder - This is an IRBuilder that automatically inserts new 1606 /// instructions into the worklist when they are created. 1607 IRBuilder<true, TargetFolder, InstCombineIRInserter> 1608 TheBuilder(F.getContext(), TargetFolder(TD), 1609 InstCombineIRInserter(Worklist)); 1610 Builder = &TheBuilder; 1611 1612 bool EverMadeChange = false; 1613 1614 // Iterate while there is work to do. 1615 unsigned Iteration = 0; 1616 while (DoOneIteration(F, Iteration++)) 1617 EverMadeChange = true; 1618 1619 Builder = 0; 1620 return EverMadeChange; 1621} 1622 1623FunctionPass *llvm::createInstructionCombiningPass() { 1624 return new InstCombiner(); 1625} 1626