InstructionCombining.cpp revision 7dfe8fd96ce3aaa027afd27b954a3b01306eafbf
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// InstructionCombining - Combine instructions to form fewer, simple 11// instructions. This pass does not modify the CFG. This pass is where 12// algebraic simplification happens. 13// 14// This pass combines things like: 15// %Y = add i32 %X, 1 16// %Z = add i32 %Y, 1 17// into: 18// %Z = add i32 %X, 2 19// 20// This is a simple worklist driven algorithm. 21// 22// This pass guarantees that the following canonicalizations are performed on 23// the program: 24// 1. If a binary operator has a constant operand, it is moved to the RHS 25// 2. Bitwise operators with constant operands are always grouped so that 26// shifts are performed first, then or's, then and's, then xor's. 27// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 28// 4. All cmp instructions on boolean values are replaced with logical ops 29// 5. add X, X is represented as (X*2) => (X << 1) 30// 6. Multiplies with a power-of-two constant argument are transformed into 31// shifts. 32// ... etc. 33// 34//===----------------------------------------------------------------------===// 35 36#define DEBUG_TYPE "instcombine" 37#include "llvm/Transforms/Scalar.h" 38#include "InstCombine.h" 39#include "llvm/IntrinsicInst.h" 40#include "llvm/Analysis/ConstantFolding.h" 41#include "llvm/Analysis/InstructionSimplify.h" 42#include "llvm/Analysis/MemoryBuiltins.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Transforms/Utils/Local.h" 45#include "llvm/Support/CFG.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/GetElementPtrTypeIterator.h" 48#include "llvm/Support/PatternMatch.h" 49#include "llvm/ADT/SmallPtrSet.h" 50#include "llvm/ADT/Statistic.h" 51#include "llvm-c/Initialization.h" 52#include <algorithm> 53#include <climits> 54using namespace llvm; 55using namespace llvm::PatternMatch; 56 57STATISTIC(NumCombined , "Number of insts combined"); 58STATISTIC(NumConstProp, "Number of constant folds"); 59STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 60STATISTIC(NumSunkInst , "Number of instructions sunk"); 61STATISTIC(NumExpand, "Number of expansions"); 62STATISTIC(NumFactor , "Number of factorizations"); 63STATISTIC(NumReassoc , "Number of reassociations"); 64 65// Initialization Routines 66void llvm::initializeInstCombine(PassRegistry &Registry) { 67 initializeInstCombinerPass(Registry); 68} 69 70void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 71 initializeInstCombine(*unwrap(R)); 72} 73 74char InstCombiner::ID = 0; 75INITIALIZE_PASS(InstCombiner, "instcombine", 76 "Combine redundant instructions", false, false) 77 78void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 79 AU.addPreservedID(LCSSAID); 80 AU.setPreservesCFG(); 81} 82 83 84/// ShouldChangeType - Return true if it is desirable to convert a computation 85/// from 'From' to 'To'. We don't want to convert from a legal to an illegal 86/// type for example, or from a smaller to a larger illegal type. 87bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const { 88 assert(From->isIntegerTy() && To->isIntegerTy()); 89 90 // If we don't have TD, we don't know if the source/dest are legal. 91 if (!TD) return false; 92 93 unsigned FromWidth = From->getPrimitiveSizeInBits(); 94 unsigned ToWidth = To->getPrimitiveSizeInBits(); 95 bool FromLegal = TD->isLegalInteger(FromWidth); 96 bool ToLegal = TD->isLegalInteger(ToWidth); 97 98 // If this is a legal integer from type, and the result would be an illegal 99 // type, don't do the transformation. 100 if (FromLegal && !ToLegal) 101 return false; 102 103 // Otherwise, if both are illegal, do not increase the size of the result. We 104 // do allow things like i160 -> i64, but not i64 -> i160. 105 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 106 return false; 107 108 return true; 109} 110 111 112/// SimplifyAssociativeOrCommutative - This performs a few simplifications for 113/// operators which are associative or commutative: 114// 115// Commutative operators: 116// 117// 1. Order operands such that they are listed from right (least complex) to 118// left (most complex). This puts constants before unary operators before 119// binary operators. 120// 121// Associative operators: 122// 123// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 124// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 125// 126// Associative and commutative operators: 127// 128// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 129// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 130// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 131// if C1 and C2 are constants. 132// 133bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 134 Instruction::BinaryOps Opcode = I.getOpcode(); 135 bool Changed = false; 136 137 do { 138 // Order operands such that they are listed from right (least complex) to 139 // left (most complex). This puts constants before unary operators before 140 // binary operators. 141 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 142 getComplexity(I.getOperand(1))) 143 Changed = !I.swapOperands(); 144 145 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 146 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 147 148 if (I.isAssociative()) { 149 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 150 if (Op0 && Op0->getOpcode() == Opcode) { 151 Value *A = Op0->getOperand(0); 152 Value *B = Op0->getOperand(1); 153 Value *C = I.getOperand(1); 154 155 // Does "B op C" simplify? 156 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) { 157 // It simplifies to V. Form "A op V". 158 I.setOperand(0, A); 159 I.setOperand(1, V); 160 Changed = true; 161 ++NumReassoc; 162 continue; 163 } 164 } 165 166 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 167 if (Op1 && Op1->getOpcode() == Opcode) { 168 Value *A = I.getOperand(0); 169 Value *B = Op1->getOperand(0); 170 Value *C = Op1->getOperand(1); 171 172 // Does "A op B" simplify? 173 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) { 174 // It simplifies to V. Form "V op C". 175 I.setOperand(0, V); 176 I.setOperand(1, C); 177 Changed = true; 178 ++NumReassoc; 179 continue; 180 } 181 } 182 } 183 184 if (I.isAssociative() && I.isCommutative()) { 185 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 186 if (Op0 && Op0->getOpcode() == Opcode) { 187 Value *A = Op0->getOperand(0); 188 Value *B = Op0->getOperand(1); 189 Value *C = I.getOperand(1); 190 191 // Does "C op A" simplify? 192 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 193 // It simplifies to V. Form "V op B". 194 I.setOperand(0, V); 195 I.setOperand(1, B); 196 Changed = true; 197 ++NumReassoc; 198 continue; 199 } 200 } 201 202 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 203 if (Op1 && Op1->getOpcode() == Opcode) { 204 Value *A = I.getOperand(0); 205 Value *B = Op1->getOperand(0); 206 Value *C = Op1->getOperand(1); 207 208 // Does "C op A" simplify? 209 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 210 // It simplifies to V. Form "B op V". 211 I.setOperand(0, B); 212 I.setOperand(1, V); 213 Changed = true; 214 ++NumReassoc; 215 continue; 216 } 217 } 218 219 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 220 // if C1 and C2 are constants. 221 if (Op0 && Op1 && 222 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 223 isa<Constant>(Op0->getOperand(1)) && 224 isa<Constant>(Op1->getOperand(1)) && 225 Op0->hasOneUse() && Op1->hasOneUse()) { 226 Value *A = Op0->getOperand(0); 227 Constant *C1 = cast<Constant>(Op0->getOperand(1)); 228 Value *B = Op1->getOperand(0); 229 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 230 231 Constant *Folded = ConstantExpr::get(Opcode, C1, C2); 232 Instruction *New = BinaryOperator::Create(Opcode, A, B, Op1->getName(), 233 &I); 234 Worklist.Add(New); 235 I.setOperand(0, New); 236 I.setOperand(1, Folded); 237 Changed = true; 238 continue; 239 } 240 } 241 242 // No further simplifications. 243 return Changed; 244 } while (1); 245} 246 247/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to 248/// "(X LOp Y) ROp (X LOp Z)". 249static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, 250 Instruction::BinaryOps ROp) { 251 switch (LOp) { 252 default: 253 return false; 254 255 case Instruction::And: 256 // And distributes over Or and Xor. 257 switch (ROp) { 258 default: 259 return false; 260 case Instruction::Or: 261 case Instruction::Xor: 262 return true; 263 } 264 265 case Instruction::Mul: 266 // Multiplication distributes over addition and subtraction. 267 switch (ROp) { 268 default: 269 return false; 270 case Instruction::Add: 271 case Instruction::Sub: 272 return true; 273 } 274 275 case Instruction::Or: 276 // Or distributes over And. 277 switch (ROp) { 278 default: 279 return false; 280 case Instruction::And: 281 return true; 282 } 283 } 284} 285 286/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to 287/// "(X ROp Z) LOp (Y ROp Z)". 288static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, 289 Instruction::BinaryOps ROp) { 290 if (Instruction::isCommutative(ROp)) 291 return LeftDistributesOverRight(ROp, LOp); 292 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 293 // but this requires knowing that the addition does not overflow and other 294 // such subtleties. 295 return false; 296} 297 298/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations 299/// which some other binary operation distributes over either by factorizing 300/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this 301/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is 302/// a win). Returns the simplified value, or null if it didn't simplify. 303Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 304 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 305 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 306 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 307 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op 308 309 // Factorization. 310 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) { 311 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 312 // a common term. 313 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1); 314 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1); 315 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 316 317 // Does "X op' Y" always equal "Y op' X"? 318 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 319 320 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 321 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 322 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 323 // commutative case, "(A op' B) op (C op' A)"? 324 if (A == C || (InnerCommutative && A == D)) { 325 if (A != C) 326 std::swap(C, D); 327 // Consider forming "A op' (B op D)". 328 // If "B op D" simplifies then it can be formed with no cost. 329 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD); 330 // If "B op D" doesn't simplify then only go on if both of the existing 331 // operations "A op' B" and "C op' D" will be zapped as no longer used. 332 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 333 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName()); 334 if (V) { 335 ++NumFactor; 336 V = Builder->CreateBinOp(InnerOpcode, A, V); 337 V->takeName(&I); 338 return V; 339 } 340 } 341 342 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 343 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 344 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 345 // commutative case, "(A op' B) op (B op' D)"? 346 if (B == D || (InnerCommutative && B == C)) { 347 if (B != D) 348 std::swap(C, D); 349 // Consider forming "(A op C) op' B". 350 // If "A op C" simplifies then it can be formed with no cost. 351 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD); 352 // If "A op C" doesn't simplify then only go on if both of the existing 353 // operations "A op' B" and "C op' D" will be zapped as no longer used. 354 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 355 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName()); 356 if (V) { 357 ++NumFactor; 358 V = Builder->CreateBinOp(InnerOpcode, V, B); 359 V->takeName(&I); 360 return V; 361 } 362 } 363 } 364 365 // Expansion. 366 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 367 // The instruction has the form "(A op' B) op C". See if expanding it out 368 // to "(A op C) op' (B op C)" results in simplifications. 369 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 370 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 371 372 // Do "A op C" and "B op C" both simplify? 373 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD)) 374 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) { 375 // They do! Return "L op' R". 376 ++NumExpand; 377 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 378 if ((L == A && R == B) || 379 (Instruction::isCommutative(InnerOpcode) && L == B && R == A)) 380 return Op0; 381 // Otherwise return "L op' R" if it simplifies. 382 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 383 return V; 384 // Otherwise, create a new instruction. 385 C = Builder->CreateBinOp(InnerOpcode, L, R); 386 C->takeName(&I); 387 return C; 388 } 389 } 390 391 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 392 // The instruction has the form "A op (B op' C)". See if expanding it out 393 // to "(A op B) op' (A op C)" results in simplifications. 394 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 395 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 396 397 // Do "A op B" and "A op C" both simplify? 398 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD)) 399 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) { 400 // They do! Return "L op' R". 401 ++NumExpand; 402 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 403 if ((L == B && R == C) || 404 (Instruction::isCommutative(InnerOpcode) && L == C && R == B)) 405 return Op1; 406 // Otherwise return "L op' R" if it simplifies. 407 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 408 return V; 409 // Otherwise, create a new instruction. 410 A = Builder->CreateBinOp(InnerOpcode, L, R); 411 A->takeName(&I); 412 return A; 413 } 414 } 415 416 return 0; 417} 418 419// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 420// if the LHS is a constant zero (which is the 'negate' form). 421// 422Value *InstCombiner::dyn_castNegVal(Value *V) const { 423 if (BinaryOperator::isNeg(V)) 424 return BinaryOperator::getNegArgument(V); 425 426 // Constants can be considered to be negated values if they can be folded. 427 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 428 return ConstantExpr::getNeg(C); 429 430 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 431 if (C->getType()->getElementType()->isIntegerTy()) 432 return ConstantExpr::getNeg(C); 433 434 return 0; 435} 436 437// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 438// instruction if the LHS is a constant negative zero (which is the 'negate' 439// form). 440// 441Value *InstCombiner::dyn_castFNegVal(Value *V) const { 442 if (BinaryOperator::isFNeg(V)) 443 return BinaryOperator::getFNegArgument(V); 444 445 // Constants can be considered to be negated values if they can be folded. 446 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 447 return ConstantExpr::getFNeg(C); 448 449 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 450 if (C->getType()->getElementType()->isFloatingPointTy()) 451 return ConstantExpr::getFNeg(C); 452 453 return 0; 454} 455 456static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 457 InstCombiner *IC) { 458 if (CastInst *CI = dyn_cast<CastInst>(&I)) 459 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 460 461 // Figure out if the constant is the left or the right argument. 462 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 463 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 464 465 if (Constant *SOC = dyn_cast<Constant>(SO)) { 466 if (ConstIsRHS) 467 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 468 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 469 } 470 471 Value *Op0 = SO, *Op1 = ConstOperand; 472 if (!ConstIsRHS) 473 std::swap(Op0, Op1); 474 475 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 476 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 477 SO->getName()+".op"); 478 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 479 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 480 SO->getName()+".cmp"); 481 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 482 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 483 SO->getName()+".cmp"); 484 llvm_unreachable("Unknown binary instruction type!"); 485} 486 487// FoldOpIntoSelect - Given an instruction with a select as one operand and a 488// constant as the other operand, try to fold the binary operator into the 489// select arguments. This also works for Cast instructions, which obviously do 490// not have a second operand. 491Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 492 // Don't modify shared select instructions 493 if (!SI->hasOneUse()) return 0; 494 Value *TV = SI->getOperand(1); 495 Value *FV = SI->getOperand(2); 496 497 if (isa<Constant>(TV) || isa<Constant>(FV)) { 498 // Bool selects with constant operands can be folded to logical ops. 499 if (SI->getType()->isIntegerTy(1)) return 0; 500 501 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 502 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 503 504 return SelectInst::Create(SI->getCondition(), SelectTrueVal, 505 SelectFalseVal); 506 } 507 return 0; 508} 509 510 511/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 512/// has a PHI node as operand #0, see if we can fold the instruction into the 513/// PHI (which is only possible if all operands to the PHI are constants). 514/// 515/// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms 516/// that would normally be unprofitable because they strongly encourage jump 517/// threading. 518Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I, 519 bool AllowAggressive) { 520 AllowAggressive = false; 521 PHINode *PN = cast<PHINode>(I.getOperand(0)); 522 unsigned NumPHIValues = PN->getNumIncomingValues(); 523 if (NumPHIValues == 0) 524 return 0; 525 526 // We normally only transform phis with a single use, unless we're trying 527 // hard to make jump threading happen. 528 if (!PN->hasOneUse() && !AllowAggressive) 529 return 0; 530 531 // Check to see if all of the operands of the PHI are simple constants 532 // (constantint/constantfp/undef). If there is one non-constant value, 533 // remember the BB it is in. If there is more than one or if *it* is a PHI, 534 // bail out. We don't do arbitrary constant expressions here because moving 535 // their computation can be expensive without a cost model. 536 BasicBlock *NonConstBB = 0; 537 for (unsigned i = 0; i != NumPHIValues; ++i) { 538 Value *InVal = PN->getIncomingValue(i); 539 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal)) 540 continue; 541 542 if (isa<PHINode>(InVal)) return 0; // Itself a phi. 543 if (NonConstBB) return 0; // More than one non-const value. 544 545 NonConstBB = PN->getIncomingBlock(i); 546 547 // If the InVal is an invoke at the end of the pred block, then we can't 548 // insert a computation after it without breaking the edge. 549 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) 550 if (II->getParent() == NonConstBB) 551 return 0; 552 553 // If the incoming non-constant value is in I's block, we have an infinite 554 // loop. 555 if (NonConstBB == I.getParent()) 556 return 0; 557 } 558 559 // If there is exactly one non-constant value, we can insert a copy of the 560 // operation in that block. However, if this is a critical edge, we would be 561 // inserting the computation one some other paths (e.g. inside a loop). Only 562 // do this if the pred block is unconditionally branching into the phi block. 563 if (NonConstBB != 0 && !AllowAggressive) { 564 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 565 if (!BI || !BI->isUnconditional()) return 0; 566 } 567 568 // Okay, we can do the transformation: create the new PHI node. 569 PHINode *NewPN = PHINode::Create(I.getType(), ""); 570 NewPN->reserveOperandSpace(PN->getNumOperands()/2); 571 InsertNewInstBefore(NewPN, *PN); 572 NewPN->takeName(PN); 573 574 // If we are going to have to insert a new computation, do so right before the 575 // predecessors terminator. 576 if (NonConstBB) 577 Builder->SetInsertPoint(NonConstBB->getTerminator()); 578 579 // Next, add all of the operands to the PHI. 580 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 581 // We only currently try to fold the condition of a select when it is a phi, 582 // not the true/false values. 583 Value *TrueV = SI->getTrueValue(); 584 Value *FalseV = SI->getFalseValue(); 585 BasicBlock *PhiTransBB = PN->getParent(); 586 for (unsigned i = 0; i != NumPHIValues; ++i) { 587 BasicBlock *ThisBB = PN->getIncomingBlock(i); 588 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 589 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 590 Value *InV = 0; 591 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 592 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 593 else 594 InV = Builder->CreateSelect(PN->getIncomingValue(i), 595 TrueVInPred, FalseVInPred, "phitmp"); 596 NewPN->addIncoming(InV, ThisBB); 597 } 598 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 599 Constant *C = cast<Constant>(I.getOperand(1)); 600 for (unsigned i = 0; i != NumPHIValues; ++i) { 601 Value *InV = 0; 602 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 603 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 604 else if (isa<ICmpInst>(CI)) 605 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), 606 C, "phitmp"); 607 else 608 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), 609 C, "phitmp"); 610 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 611 } 612 } else if (I.getNumOperands() == 2) { 613 Constant *C = cast<Constant>(I.getOperand(1)); 614 for (unsigned i = 0; i != NumPHIValues; ++i) { 615 Value *InV = 0; 616 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 617 InV = ConstantExpr::get(I.getOpcode(), InC, C); 618 else 619 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 620 PN->getIncomingValue(i), C, "phitmp"); 621 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 622 } 623 } else { 624 CastInst *CI = cast<CastInst>(&I); 625 const Type *RetTy = CI->getType(); 626 for (unsigned i = 0; i != NumPHIValues; ++i) { 627 Value *InV; 628 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 629 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 630 else 631 InV = Builder->CreateCast(CI->getOpcode(), 632 PN->getIncomingValue(i), I.getType(), "phitmp"); 633 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 634 } 635 } 636 return ReplaceInstUsesWith(I, NewPN); 637} 638 639/// FindElementAtOffset - Given a type and a constant offset, determine whether 640/// or not there is a sequence of GEP indices into the type that will land us at 641/// the specified offset. If so, fill them into NewIndices and return the 642/// resultant element type, otherwise return null. 643const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset, 644 SmallVectorImpl<Value*> &NewIndices) { 645 if (!TD) return 0; 646 if (!Ty->isSized()) return 0; 647 648 // Start with the index over the outer type. Note that the type size 649 // might be zero (even if the offset isn't zero) if the indexed type 650 // is something like [0 x {int, int}] 651 const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 652 int64_t FirstIdx = 0; 653 if (int64_t TySize = TD->getTypeAllocSize(Ty)) { 654 FirstIdx = Offset/TySize; 655 Offset -= FirstIdx*TySize; 656 657 // Handle hosts where % returns negative instead of values [0..TySize). 658 if (Offset < 0) { 659 --FirstIdx; 660 Offset += TySize; 661 assert(Offset >= 0); 662 } 663 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 664 } 665 666 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 667 668 // Index into the types. If we fail, set OrigBase to null. 669 while (Offset) { 670 // Indexing into tail padding between struct/array elements. 671 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty)) 672 return 0; 673 674 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 675 const StructLayout *SL = TD->getStructLayout(STy); 676 assert(Offset < (int64_t)SL->getSizeInBytes() && 677 "Offset must stay within the indexed type"); 678 679 unsigned Elt = SL->getElementContainingOffset(Offset); 680 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 681 Elt)); 682 683 Offset -= SL->getElementOffset(Elt); 684 Ty = STy->getElementType(Elt); 685 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 686 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); 687 assert(EltSize && "Cannot index into a zero-sized array"); 688 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 689 Offset %= EltSize; 690 Ty = AT->getElementType(); 691 } else { 692 // Otherwise, we can't index into the middle of this atomic type, bail. 693 return 0; 694 } 695 } 696 697 return Ty; 698} 699 700 701 702Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 703 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 704 705 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD)) 706 return ReplaceInstUsesWith(GEP, V); 707 708 Value *PtrOp = GEP.getOperand(0); 709 710 // Eliminate unneeded casts for indices, and replace indices which displace 711 // by multiples of a zero size type with zero. 712 if (TD) { 713 bool MadeChange = false; 714 const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); 715 716 gep_type_iterator GTI = gep_type_begin(GEP); 717 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 718 I != E; ++I, ++GTI) { 719 // Skip indices into struct types. 720 const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI); 721 if (!SeqTy) continue; 722 723 // If the element type has zero size then any index over it is equivalent 724 // to an index of zero, so replace it with zero if it is not zero already. 725 if (SeqTy->getElementType()->isSized() && 726 TD->getTypeAllocSize(SeqTy->getElementType()) == 0) 727 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { 728 *I = Constant::getNullValue(IntPtrTy); 729 MadeChange = true; 730 } 731 732 if ((*I)->getType() != IntPtrTy) { 733 // If we are using a wider index than needed for this platform, shrink 734 // it to what we need. If narrower, sign-extend it to what we need. 735 // This explicit cast can make subsequent optimizations more obvious. 736 *I = Builder->CreateIntCast(*I, IntPtrTy, true); 737 MadeChange = true; 738 } 739 } 740 if (MadeChange) return &GEP; 741 } 742 743 // Combine Indices - If the source pointer to this getelementptr instruction 744 // is a getelementptr instruction, combine the indices of the two 745 // getelementptr instructions into a single instruction. 746 // 747 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 748 // Note that if our source is a gep chain itself that we wait for that 749 // chain to be resolved before we perform this transformation. This 750 // avoids us creating a TON of code in some cases. 751 // 752 if (GetElementPtrInst *SrcGEP = 753 dyn_cast<GetElementPtrInst>(Src->getOperand(0))) 754 if (SrcGEP->getNumOperands() == 2) 755 return 0; // Wait until our source is folded to completion. 756 757 SmallVector<Value*, 8> Indices; 758 759 // Find out whether the last index in the source GEP is a sequential idx. 760 bool EndsWithSequential = false; 761 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 762 I != E; ++I) 763 EndsWithSequential = !(*I)->isStructTy(); 764 765 // Can we combine the two pointer arithmetics offsets? 766 if (EndsWithSequential) { 767 // Replace: gep (gep %P, long B), long A, ... 768 // With: T = long A+B; gep %P, T, ... 769 // 770 Value *Sum; 771 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 772 Value *GO1 = GEP.getOperand(1); 773 if (SO1 == Constant::getNullValue(SO1->getType())) { 774 Sum = GO1; 775 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 776 Sum = SO1; 777 } else { 778 // If they aren't the same type, then the input hasn't been processed 779 // by the loop above yet (which canonicalizes sequential index types to 780 // intptr_t). Just avoid transforming this until the input has been 781 // normalized. 782 if (SO1->getType() != GO1->getType()) 783 return 0; 784 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 785 } 786 787 // Update the GEP in place if possible. 788 if (Src->getNumOperands() == 2) { 789 GEP.setOperand(0, Src->getOperand(0)); 790 GEP.setOperand(1, Sum); 791 return &GEP; 792 } 793 Indices.append(Src->op_begin()+1, Src->op_end()-1); 794 Indices.push_back(Sum); 795 Indices.append(GEP.op_begin()+2, GEP.op_end()); 796 } else if (isa<Constant>(*GEP.idx_begin()) && 797 cast<Constant>(*GEP.idx_begin())->isNullValue() && 798 Src->getNumOperands() != 1) { 799 // Otherwise we can do the fold if the first index of the GEP is a zero 800 Indices.append(Src->op_begin()+1, Src->op_end()); 801 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 802 } 803 804 if (!Indices.empty()) 805 return (GEP.isInBounds() && Src->isInBounds()) ? 806 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(), 807 Indices.end(), GEP.getName()) : 808 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(), 809 Indices.end(), GEP.getName()); 810 } 811 812 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 813 Value *StrippedPtr = PtrOp->stripPointerCasts(); 814 if (StrippedPtr != PtrOp) { 815 const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); 816 817 bool HasZeroPointerIndex = false; 818 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 819 HasZeroPointerIndex = C->isZero(); 820 821 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 822 // into : GEP [10 x i8]* X, i32 0, ... 823 // 824 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 825 // into : GEP i8* X, ... 826 // 827 // This occurs when the program declares an array extern like "int X[];" 828 if (HasZeroPointerIndex) { 829 const PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 830 if (const ArrayType *CATy = 831 dyn_cast<ArrayType>(CPTy->getElementType())) { 832 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 833 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 834 // -> GEP i8* X, ... 835 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 836 GetElementPtrInst *Res = 837 GetElementPtrInst::Create(StrippedPtr, Idx.begin(), 838 Idx.end(), GEP.getName()); 839 Res->setIsInBounds(GEP.isInBounds()); 840 return Res; 841 } 842 843 if (const ArrayType *XATy = 844 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 845 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 846 if (CATy->getElementType() == XATy->getElementType()) { 847 // -> GEP [10 x i8]* X, i32 0, ... 848 // At this point, we know that the cast source type is a pointer 849 // to an array of the same type as the destination pointer 850 // array. Because the array type is never stepped over (there 851 // is a leading zero) we can fold the cast into this GEP. 852 GEP.setOperand(0, StrippedPtr); 853 return &GEP; 854 } 855 } 856 } 857 } else if (GEP.getNumOperands() == 2) { 858 // Transform things like: 859 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 860 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 861 const Type *SrcElTy = StrippedPtrTy->getElementType(); 862 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); 863 if (TD && SrcElTy->isArrayTy() && 864 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == 865 TD->getTypeAllocSize(ResElTy)) { 866 Value *Idx[2]; 867 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 868 Idx[1] = GEP.getOperand(1); 869 Value *NewGEP = GEP.isInBounds() ? 870 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) : 871 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 872 // V and GEP are both pointer types --> BitCast 873 return new BitCastInst(NewGEP, GEP.getType()); 874 } 875 876 // Transform things like: 877 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 878 // (where tmp = 8*tmp2) into: 879 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 880 881 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { 882 uint64_t ArrayEltSize = 883 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); 884 885 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We 886 // allow either a mul, shift, or constant here. 887 Value *NewIdx = 0; 888 ConstantInt *Scale = 0; 889 if (ArrayEltSize == 1) { 890 NewIdx = GEP.getOperand(1); 891 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); 892 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) { 893 NewIdx = ConstantInt::get(CI->getType(), 1); 894 Scale = CI; 895 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){ 896 if (Inst->getOpcode() == Instruction::Shl && 897 isa<ConstantInt>(Inst->getOperand(1))) { 898 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); 899 uint32_t ShAmtVal = ShAmt->getLimitedValue(64); 900 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 901 1ULL << ShAmtVal); 902 NewIdx = Inst->getOperand(0); 903 } else if (Inst->getOpcode() == Instruction::Mul && 904 isa<ConstantInt>(Inst->getOperand(1))) { 905 Scale = cast<ConstantInt>(Inst->getOperand(1)); 906 NewIdx = Inst->getOperand(0); 907 } 908 } 909 910 // If the index will be to exactly the right offset with the scale taken 911 // out, perform the transformation. Note, we don't know whether Scale is 912 // signed or not. We'll use unsigned version of division/modulo 913 // operation after making sure Scale doesn't have the sign bit set. 914 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && 915 Scale->getZExtValue() % ArrayEltSize == 0) { 916 Scale = ConstantInt::get(Scale->getType(), 917 Scale->getZExtValue() / ArrayEltSize); 918 if (Scale->getZExtValue() != 1) { 919 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), 920 false /*ZExt*/); 921 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); 922 } 923 924 // Insert the new GEP instruction. 925 Value *Idx[2]; 926 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 927 Idx[1] = NewIdx; 928 Value *NewGEP = GEP.isInBounds() ? 929 Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()): 930 Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()); 931 // The NewGEP must be pointer typed, so must the old one -> BitCast 932 return new BitCastInst(NewGEP, GEP.getType()); 933 } 934 } 935 } 936 } 937 938 /// See if we can simplify: 939 /// X = bitcast A* to B* 940 /// Y = gep X, <...constant indices...> 941 /// into a gep of the original struct. This is important for SROA and alias 942 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 943 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 944 if (TD && 945 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) { 946 // Determine how much the GEP moves the pointer. We are guaranteed to get 947 // a constant back from EmitGEPOffset. 948 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP)); 949 int64_t Offset = OffsetV->getSExtValue(); 950 951 // If this GEP instruction doesn't move the pointer, just replace the GEP 952 // with a bitcast of the real input to the dest type. 953 if (Offset == 0) { 954 // If the bitcast is of an allocation, and the allocation will be 955 // converted to match the type of the cast, don't touch this. 956 if (isa<AllocaInst>(BCI->getOperand(0)) || 957 isMalloc(BCI->getOperand(0))) { 958 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 959 if (Instruction *I = visitBitCast(*BCI)) { 960 if (I != BCI) { 961 I->takeName(BCI); 962 BCI->getParent()->getInstList().insert(BCI, I); 963 ReplaceInstUsesWith(*BCI, I); 964 } 965 return &GEP; 966 } 967 } 968 return new BitCastInst(BCI->getOperand(0), GEP.getType()); 969 } 970 971 // Otherwise, if the offset is non-zero, we need to find out if there is a 972 // field at Offset in 'A's type. If so, we can pull the cast through the 973 // GEP. 974 SmallVector<Value*, 8> NewIndices; 975 const Type *InTy = 976 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType(); 977 if (FindElementAtOffset(InTy, Offset, NewIndices)) { 978 Value *NGEP = GEP.isInBounds() ? 979 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(), 980 NewIndices.end()) : 981 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(), 982 NewIndices.end()); 983 984 if (NGEP->getType() == GEP.getType()) 985 return ReplaceInstUsesWith(GEP, NGEP); 986 NGEP->takeName(&GEP); 987 return new BitCastInst(NGEP, GEP.getType()); 988 } 989 } 990 } 991 992 return 0; 993} 994 995 996 997static bool IsOnlyNullComparedAndFreed(const Value &V) { 998 for (Value::const_use_iterator UI = V.use_begin(), UE = V.use_end(); 999 UI != UE; ++UI) { 1000 const User *U = *UI; 1001 if (isFreeCall(U)) 1002 continue; 1003 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(U)) 1004 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) 1005 continue; 1006 return false; 1007 } 1008 return true; 1009} 1010 1011Instruction *InstCombiner::visitMalloc(Instruction &MI) { 1012 // If we have a malloc call which is only used in any amount of comparisons 1013 // to null and free calls, delete the calls and replace the comparisons with 1014 // true or false as appropriate. 1015 if (IsOnlyNullComparedAndFreed(MI)) { 1016 for (Value::use_iterator UI = MI.use_begin(), UE = MI.use_end(); 1017 UI != UE;) { 1018 // We can assume that every remaining use is a free call or an icmp eq/ne 1019 // to null, so the cast is safe. 1020 Instruction *I = cast<Instruction>(*UI); 1021 1022 // Early increment here, as we're about to get rid of the user. 1023 ++UI; 1024 1025 if (isFreeCall(I)) { 1026 EraseInstFromFunction(*cast<CallInst>(I)); 1027 continue; 1028 } 1029 // Again, the cast is safe. 1030 ICmpInst *C = cast<ICmpInst>(I); 1031 ReplaceInstUsesWith(*C, ConstantInt::get(Type::getInt1Ty(C->getContext()), 1032 C->isFalseWhenEqual())); 1033 EraseInstFromFunction(*C); 1034 } 1035 return EraseInstFromFunction(MI); 1036 } 1037 return 0; 1038} 1039 1040 1041 1042Instruction *InstCombiner::visitFree(CallInst &FI) { 1043 Value *Op = FI.getArgOperand(0); 1044 1045 // free undef -> unreachable. 1046 if (isa<UndefValue>(Op)) { 1047 // Insert a new store to null because we cannot modify the CFG here. 1048 new StoreInst(ConstantInt::getTrue(FI.getContext()), 1049 UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI); 1050 return EraseInstFromFunction(FI); 1051 } 1052 1053 // If we have 'free null' delete the instruction. This can happen in stl code 1054 // when lots of inlining happens. 1055 if (isa<ConstantPointerNull>(Op)) 1056 return EraseInstFromFunction(FI); 1057 1058 return 0; 1059} 1060 1061 1062 1063Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 1064 // Change br (not X), label True, label False to: br X, label False, True 1065 Value *X = 0; 1066 BasicBlock *TrueDest; 1067 BasicBlock *FalseDest; 1068 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 1069 !isa<Constant>(X)) { 1070 // Swap Destinations and condition... 1071 BI.setCondition(X); 1072 BI.setSuccessor(0, FalseDest); 1073 BI.setSuccessor(1, TrueDest); 1074 return &BI; 1075 } 1076 1077 // Cannonicalize fcmp_one -> fcmp_oeq 1078 FCmpInst::Predicate FPred; Value *Y; 1079 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 1080 TrueDest, FalseDest)) && 1081 BI.getCondition()->hasOneUse()) 1082 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 1083 FPred == FCmpInst::FCMP_OGE) { 1084 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 1085 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 1086 1087 // Swap Destinations and condition. 1088 BI.setSuccessor(0, FalseDest); 1089 BI.setSuccessor(1, TrueDest); 1090 Worklist.Add(Cond); 1091 return &BI; 1092 } 1093 1094 // Cannonicalize icmp_ne -> icmp_eq 1095 ICmpInst::Predicate IPred; 1096 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 1097 TrueDest, FalseDest)) && 1098 BI.getCondition()->hasOneUse()) 1099 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 1100 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 1101 IPred == ICmpInst::ICMP_SGE) { 1102 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 1103 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 1104 // Swap Destinations and condition. 1105 BI.setSuccessor(0, FalseDest); 1106 BI.setSuccessor(1, TrueDest); 1107 Worklist.Add(Cond); 1108 return &BI; 1109 } 1110 1111 return 0; 1112} 1113 1114Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 1115 Value *Cond = SI.getCondition(); 1116 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 1117 if (I->getOpcode() == Instruction::Add) 1118 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 1119 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 1120 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2) 1121 SI.setOperand(i, 1122 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)), 1123 AddRHS)); 1124 SI.setOperand(0, I->getOperand(0)); 1125 Worklist.Add(I); 1126 return &SI; 1127 } 1128 } 1129 return 0; 1130} 1131 1132Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 1133 Value *Agg = EV.getAggregateOperand(); 1134 1135 if (!EV.hasIndices()) 1136 return ReplaceInstUsesWith(EV, Agg); 1137 1138 if (Constant *C = dyn_cast<Constant>(Agg)) { 1139 if (isa<UndefValue>(C)) 1140 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); 1141 1142 if (isa<ConstantAggregateZero>(C)) 1143 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); 1144 1145 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 1146 // Extract the element indexed by the first index out of the constant 1147 Value *V = C->getOperand(*EV.idx_begin()); 1148 if (EV.getNumIndices() > 1) 1149 // Extract the remaining indices out of the constant indexed by the 1150 // first index 1151 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end()); 1152 else 1153 return ReplaceInstUsesWith(EV, V); 1154 } 1155 return 0; // Can't handle other constants 1156 } 1157 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 1158 // We're extracting from an insertvalue instruction, compare the indices 1159 const unsigned *exti, *exte, *insi, *inse; 1160 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 1161 exte = EV.idx_end(), inse = IV->idx_end(); 1162 exti != exte && insi != inse; 1163 ++exti, ++insi) { 1164 if (*insi != *exti) 1165 // The insert and extract both reference distinctly different elements. 1166 // This means the extract is not influenced by the insert, and we can 1167 // replace the aggregate operand of the extract with the aggregate 1168 // operand of the insert. i.e., replace 1169 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1170 // %E = extractvalue { i32, { i32 } } %I, 0 1171 // with 1172 // %E = extractvalue { i32, { i32 } } %A, 0 1173 return ExtractValueInst::Create(IV->getAggregateOperand(), 1174 EV.idx_begin(), EV.idx_end()); 1175 } 1176 if (exti == exte && insi == inse) 1177 // Both iterators are at the end: Index lists are identical. Replace 1178 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1179 // %C = extractvalue { i32, { i32 } } %B, 1, 0 1180 // with "i32 42" 1181 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 1182 if (exti == exte) { 1183 // The extract list is a prefix of the insert list. i.e. replace 1184 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1185 // %E = extractvalue { i32, { i32 } } %I, 1 1186 // with 1187 // %X = extractvalue { i32, { i32 } } %A, 1 1188 // %E = insertvalue { i32 } %X, i32 42, 0 1189 // by switching the order of the insert and extract (though the 1190 // insertvalue should be left in, since it may have other uses). 1191 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 1192 EV.idx_begin(), EV.idx_end()); 1193 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 1194 insi, inse); 1195 } 1196 if (insi == inse) 1197 // The insert list is a prefix of the extract list 1198 // We can simply remove the common indices from the extract and make it 1199 // operate on the inserted value instead of the insertvalue result. 1200 // i.e., replace 1201 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1202 // %E = extractvalue { i32, { i32 } } %I, 1, 0 1203 // with 1204 // %E extractvalue { i32 } { i32 42 }, 0 1205 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 1206 exti, exte); 1207 } 1208 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 1209 // We're extracting from an intrinsic, see if we're the only user, which 1210 // allows us to simplify multiple result intrinsics to simpler things that 1211 // just get one value. 1212 if (II->hasOneUse()) { 1213 // Check if we're grabbing the overflow bit or the result of a 'with 1214 // overflow' intrinsic. If it's the latter we can remove the intrinsic 1215 // and replace it with a traditional binary instruction. 1216 switch (II->getIntrinsicID()) { 1217 case Intrinsic::uadd_with_overflow: 1218 case Intrinsic::sadd_with_overflow: 1219 if (*EV.idx_begin() == 0) { // Normal result. 1220 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1221 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1222 EraseInstFromFunction(*II); 1223 return BinaryOperator::CreateAdd(LHS, RHS); 1224 } 1225 1226 // If the normal result of the add is dead, and the RHS is a constant, 1227 // we can transform this into a range comparison. 1228 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 1229 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) 1230 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1))) 1231 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), 1232 ConstantExpr::getNot(CI)); 1233 break; 1234 case Intrinsic::usub_with_overflow: 1235 case Intrinsic::ssub_with_overflow: 1236 if (*EV.idx_begin() == 0) { // Normal result. 1237 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1238 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1239 EraseInstFromFunction(*II); 1240 return BinaryOperator::CreateSub(LHS, RHS); 1241 } 1242 break; 1243 case Intrinsic::umul_with_overflow: 1244 case Intrinsic::smul_with_overflow: 1245 if (*EV.idx_begin() == 0) { // Normal result. 1246 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1247 II->replaceAllUsesWith(UndefValue::get(II->getType())); 1248 EraseInstFromFunction(*II); 1249 return BinaryOperator::CreateMul(LHS, RHS); 1250 } 1251 break; 1252 default: 1253 break; 1254 } 1255 } 1256 } 1257 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 1258 // If the (non-volatile) load only has one use, we can rewrite this to a 1259 // load from a GEP. This reduces the size of the load. 1260 // FIXME: If a load is used only by extractvalue instructions then this 1261 // could be done regardless of having multiple uses. 1262 if (!L->isVolatile() && L->hasOneUse()) { 1263 // extractvalue has integer indices, getelementptr has Value*s. Convert. 1264 SmallVector<Value*, 4> Indices; 1265 // Prefix an i32 0 since we need the first element. 1266 Indices.push_back(Builder->getInt32(0)); 1267 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 1268 I != E; ++I) 1269 Indices.push_back(Builder->getInt32(*I)); 1270 1271 // We need to insert these at the location of the old load, not at that of 1272 // the extractvalue. 1273 Builder->SetInsertPoint(L->getParent(), L); 1274 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), 1275 Indices.begin(), Indices.end()); 1276 // Returning the load directly will cause the main loop to insert it in 1277 // the wrong spot, so use ReplaceInstUsesWith(). 1278 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); 1279 } 1280 // We could simplify extracts from other values. Note that nested extracts may 1281 // already be simplified implicitly by the above: extract (extract (insert) ) 1282 // will be translated into extract ( insert ( extract ) ) first and then just 1283 // the value inserted, if appropriate. Similarly for extracts from single-use 1284 // loads: extract (extract (load)) will be translated to extract (load (gep)) 1285 // and if again single-use then via load (gep (gep)) to load (gep). 1286 // However, double extracts from e.g. function arguments or return values 1287 // aren't handled yet. 1288 return 0; 1289} 1290 1291 1292 1293 1294/// TryToSinkInstruction - Try to move the specified instruction from its 1295/// current block into the beginning of DestBlock, which can only happen if it's 1296/// safe to move the instruction past all of the instructions between it and the 1297/// end of its block. 1298static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 1299 assert(I->hasOneUse() && "Invariants didn't hold!"); 1300 1301 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 1302 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I)) 1303 return false; 1304 1305 // Do not sink alloca instructions out of the entry block. 1306 if (isa<AllocaInst>(I) && I->getParent() == 1307 &DestBlock->getParent()->getEntryBlock()) 1308 return false; 1309 1310 // We can only sink load instructions if there is nothing between the load and 1311 // the end of block that could change the value. 1312 if (I->mayReadFromMemory()) { 1313 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 1314 Scan != E; ++Scan) 1315 if (Scan->mayWriteToMemory()) 1316 return false; 1317 } 1318 1319 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI(); 1320 1321 I->moveBefore(InsertPos); 1322 ++NumSunkInst; 1323 return true; 1324} 1325 1326 1327/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 1328/// all reachable code to the worklist. 1329/// 1330/// This has a couple of tricks to make the code faster and more powerful. In 1331/// particular, we constant fold and DCE instructions as we go, to avoid adding 1332/// them to the worklist (this significantly speeds up instcombine on code where 1333/// many instructions are dead or constant). Additionally, if we find a branch 1334/// whose condition is a known constant, we only visit the reachable successors. 1335/// 1336static bool AddReachableCodeToWorklist(BasicBlock *BB, 1337 SmallPtrSet<BasicBlock*, 64> &Visited, 1338 InstCombiner &IC, 1339 const TargetData *TD) { 1340 bool MadeIRChange = false; 1341 SmallVector<BasicBlock*, 256> Worklist; 1342 Worklist.push_back(BB); 1343 1344 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 1345 SmallPtrSet<ConstantExpr*, 64> FoldedConstants; 1346 1347 do { 1348 BB = Worklist.pop_back_val(); 1349 1350 // We have now visited this block! If we've already been here, ignore it. 1351 if (!Visited.insert(BB)) continue; 1352 1353 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 1354 Instruction *Inst = BBI++; 1355 1356 // DCE instruction if trivially dead. 1357 if (isInstructionTriviallyDead(Inst)) { 1358 ++NumDeadInst; 1359 DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); 1360 Inst->eraseFromParent(); 1361 continue; 1362 } 1363 1364 // ConstantProp instruction if trivially constant. 1365 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 1366 if (Constant *C = ConstantFoldInstruction(Inst, TD)) { 1367 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " 1368 << *Inst << '\n'); 1369 Inst->replaceAllUsesWith(C); 1370 ++NumConstProp; 1371 Inst->eraseFromParent(); 1372 continue; 1373 } 1374 1375 if (TD) { 1376 // See if we can constant fold its operands. 1377 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 1378 i != e; ++i) { 1379 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 1380 if (CE == 0) continue; 1381 1382 // If we already folded this constant, don't try again. 1383 if (!FoldedConstants.insert(CE)) 1384 continue; 1385 1386 Constant *NewC = ConstantFoldConstantExpression(CE, TD); 1387 if (NewC && NewC != CE) { 1388 *i = NewC; 1389 MadeIRChange = true; 1390 } 1391 } 1392 } 1393 1394 InstrsForInstCombineWorklist.push_back(Inst); 1395 } 1396 1397 // Recursively visit successors. If this is a branch or switch on a 1398 // constant, only visit the reachable successor. 1399 TerminatorInst *TI = BB->getTerminator(); 1400 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1401 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 1402 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 1403 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 1404 Worklist.push_back(ReachableBB); 1405 continue; 1406 } 1407 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1408 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 1409 // See if this is an explicit destination. 1410 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) 1411 if (SI->getCaseValue(i) == Cond) { 1412 BasicBlock *ReachableBB = SI->getSuccessor(i); 1413 Worklist.push_back(ReachableBB); 1414 continue; 1415 } 1416 1417 // Otherwise it is the default destination. 1418 Worklist.push_back(SI->getSuccessor(0)); 1419 continue; 1420 } 1421 } 1422 1423 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 1424 Worklist.push_back(TI->getSuccessor(i)); 1425 } while (!Worklist.empty()); 1426 1427 // Once we've found all of the instructions to add to instcombine's worklist, 1428 // add them in reverse order. This way instcombine will visit from the top 1429 // of the function down. This jives well with the way that it adds all uses 1430 // of instructions to the worklist after doing a transformation, thus avoiding 1431 // some N^2 behavior in pathological cases. 1432 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 1433 InstrsForInstCombineWorklist.size()); 1434 1435 return MadeIRChange; 1436} 1437 1438bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 1439 MadeIRChange = false; 1440 1441 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 1442 << F.getNameStr() << "\n"); 1443 1444 { 1445 // Do a depth-first traversal of the function, populate the worklist with 1446 // the reachable instructions. Ignore blocks that are not reachable. Keep 1447 // track of which blocks we visit. 1448 SmallPtrSet<BasicBlock*, 64> Visited; 1449 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD); 1450 1451 // Do a quick scan over the function. If we find any blocks that are 1452 // unreachable, remove any instructions inside of them. This prevents 1453 // the instcombine code from having to deal with some bad special cases. 1454 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 1455 if (!Visited.count(BB)) { 1456 Instruction *Term = BB->getTerminator(); 1457 while (Term != BB->begin()) { // Remove instrs bottom-up 1458 BasicBlock::iterator I = Term; --I; 1459 1460 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1461 // A debug intrinsic shouldn't force another iteration if we weren't 1462 // going to do one without it. 1463 if (!isa<DbgInfoIntrinsic>(I)) { 1464 ++NumDeadInst; 1465 MadeIRChange = true; 1466 } 1467 1468 // If I is not void type then replaceAllUsesWith undef. 1469 // This allows ValueHandlers and custom metadata to adjust itself. 1470 if (!I->getType()->isVoidTy()) 1471 I->replaceAllUsesWith(UndefValue::get(I->getType())); 1472 I->eraseFromParent(); 1473 } 1474 } 1475 } 1476 1477 while (!Worklist.isEmpty()) { 1478 Instruction *I = Worklist.RemoveOne(); 1479 if (I == 0) continue; // skip null values. 1480 1481 // Check to see if we can DCE the instruction. 1482 if (isInstructionTriviallyDead(I)) { 1483 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1484 EraseInstFromFunction(*I); 1485 ++NumDeadInst; 1486 MadeIRChange = true; 1487 continue; 1488 } 1489 1490 // Instruction isn't dead, see if we can constant propagate it. 1491 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 1492 if (Constant *C = ConstantFoldInstruction(I, TD)) { 1493 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 1494 1495 // Add operands to the worklist. 1496 ReplaceInstUsesWith(*I, C); 1497 ++NumConstProp; 1498 EraseInstFromFunction(*I); 1499 MadeIRChange = true; 1500 continue; 1501 } 1502 1503 // See if we can trivially sink this instruction to a successor basic block. 1504 if (I->hasOneUse()) { 1505 BasicBlock *BB = I->getParent(); 1506 Instruction *UserInst = cast<Instruction>(I->use_back()); 1507 BasicBlock *UserParent; 1508 1509 // Get the block the use occurs in. 1510 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 1511 UserParent = PN->getIncomingBlock(I->use_begin().getUse()); 1512 else 1513 UserParent = UserInst->getParent(); 1514 1515 if (UserParent != BB) { 1516 bool UserIsSuccessor = false; 1517 // See if the user is one of our successors. 1518 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 1519 if (*SI == UserParent) { 1520 UserIsSuccessor = true; 1521 break; 1522 } 1523 1524 // If the user is one of our immediate successors, and if that successor 1525 // only has us as a predecessors (we'd have to split the critical edge 1526 // otherwise), we can keep going. 1527 if (UserIsSuccessor && UserParent->getSinglePredecessor()) 1528 // Okay, the CFG is simple enough, try to sink this instruction. 1529 MadeIRChange |= TryToSinkInstruction(I, UserParent); 1530 } 1531 } 1532 1533 // Now that we have an instruction, try combining it to simplify it. 1534 Builder->SetInsertPoint(I->getParent(), I); 1535 1536#ifndef NDEBUG 1537 std::string OrigI; 1538#endif 1539 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 1540 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n'); 1541 1542 if (Instruction *Result = visit(*I)) { 1543 ++NumCombined; 1544 // Should we replace the old instruction with a new one? 1545 if (Result != I) { 1546 DEBUG(errs() << "IC: Old = " << *I << '\n' 1547 << " New = " << *Result << '\n'); 1548 1549 // Everything uses the new instruction now. 1550 I->replaceAllUsesWith(Result); 1551 1552 // Push the new instruction and any users onto the worklist. 1553 Worklist.Add(Result); 1554 Worklist.AddUsersToWorkList(*Result); 1555 1556 // Move the name to the new instruction first. 1557 Result->takeName(I); 1558 1559 // Insert the new instruction into the basic block... 1560 BasicBlock *InstParent = I->getParent(); 1561 BasicBlock::iterator InsertPos = I; 1562 1563 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert 1564 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs. 1565 ++InsertPos; 1566 1567 InstParent->getInstList().insert(InsertPos, Result); 1568 1569 EraseInstFromFunction(*I); 1570 } else { 1571#ifndef NDEBUG 1572 DEBUG(errs() << "IC: Mod = " << OrigI << '\n' 1573 << " New = " << *I << '\n'); 1574#endif 1575 1576 // If the instruction was modified, it's possible that it is now dead. 1577 // if so, remove it. 1578 if (isInstructionTriviallyDead(I)) { 1579 EraseInstFromFunction(*I); 1580 } else { 1581 Worklist.Add(I); 1582 Worklist.AddUsersToWorkList(*I); 1583 } 1584 } 1585 MadeIRChange = true; 1586 } 1587 } 1588 1589 Worklist.Zap(); 1590 return MadeIRChange; 1591} 1592 1593 1594bool InstCombiner::runOnFunction(Function &F) { 1595 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID); 1596 TD = getAnalysisIfAvailable<TargetData>(); 1597 1598 1599 /// Builder - This is an IRBuilder that automatically inserts new 1600 /// instructions into the worklist when they are created. 1601 IRBuilder<true, TargetFolder, InstCombineIRInserter> 1602 TheBuilder(F.getContext(), TargetFolder(TD), 1603 InstCombineIRInserter(Worklist)); 1604 Builder = &TheBuilder; 1605 1606 bool EverMadeChange = false; 1607 1608 // Iterate while there is work to do. 1609 unsigned Iteration = 0; 1610 while (DoOneIteration(F, Iteration++)) 1611 EverMadeChange = true; 1612 1613 Builder = 0; 1614 return EverMadeChange; 1615} 1616 1617FunctionPass *llvm::createInstructionCombiningPass() { 1618 return new InstCombiner(); 1619} 1620