InstCombineCalls.cpp revision cb348b9b45025393ec5b28eac8bb6773a9b603f6
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/Support/CallSite.h" 16#include "llvm/Target/TargetData.h" 17#include "llvm/Analysis/MemoryBuiltins.h" 18#include "llvm/Transforms/Utils/BuildLibCalls.h" 19#include "llvm/Transforms/Utils/Local.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static Type *getPromotedType(Type *Ty) { 25 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32 33Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 34 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD); 35 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD); 36 unsigned MinAlign = std::min(DstAlign, SrcAlign); 37 unsigned CopyAlign = MI->getAlignment(); 38 39 if (CopyAlign < MinAlign) { 40 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 41 MinAlign, false)); 42 return MI; 43 } 44 45 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 46 // load/store. 47 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 48 if (MemOpLength == 0) return 0; 49 50 // Source and destination pointer types are always "i8*" for intrinsic. See 51 // if the size is something we can handle with a single primitive load/store. 52 // A single load+store correctly handles overlapping memory in the memmove 53 // case. 54 unsigned Size = MemOpLength->getZExtValue(); 55 if (Size == 0) return MI; // Delete this mem transfer. 56 57 if (Size > 8 || (Size&(Size-1))) 58 return 0; // If not 1/2/4/8 bytes, exit. 59 60 // Use an integer load+store unless we can find something better. 61 unsigned SrcAddrSp = 62 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 63 unsigned DstAddrSp = 64 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 65 66 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 67 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 68 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 69 70 // Memcpy forces the use of i8* for the source and destination. That means 71 // that if you're using memcpy to move one double around, you'll get a cast 72 // from double* to i8*. We'd much rather use a double load+store rather than 73 // an i64 load+store, here because this improves the odds that the source or 74 // dest address will be promotable. See if we can find a better type than the 75 // integer datatype. 76 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 77 if (StrippedDest != MI->getArgOperand(0)) { 78 Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 79 ->getElementType(); 80 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 81 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 82 // down through these levels if so. 83 while (!SrcETy->isSingleValueType()) { 84 if (StructType *STy = dyn_cast<StructType>(SrcETy)) { 85 if (STy->getNumElements() == 1) 86 SrcETy = STy->getElementType(0); 87 else 88 break; 89 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 90 if (ATy->getNumElements() == 1) 91 SrcETy = ATy->getElementType(); 92 else 93 break; 94 } else 95 break; 96 } 97 98 if (SrcETy->isSingleValueType()) { 99 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 100 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 101 } 102 } 103 } 104 105 106 // If the memcpy/memmove provides better alignment info than we can 107 // infer, use it. 108 SrcAlign = std::max(SrcAlign, CopyAlign); 109 DstAlign = std::max(DstAlign, CopyAlign); 110 111 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 112 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 113 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile()); 114 L->setAlignment(SrcAlign); 115 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile()); 116 S->setAlignment(DstAlign); 117 118 // Set the size of the copy to 0, it will be deleted on the next iteration. 119 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 120 return MI; 121} 122 123Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 124 unsigned Alignment = getKnownAlignment(MI->getDest(), TD); 125 if (MI->getAlignment() < Alignment) { 126 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 127 Alignment, false)); 128 return MI; 129 } 130 131 // Extract the length and alignment and fill if they are constant. 132 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 133 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 134 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 135 return 0; 136 uint64_t Len = LenC->getZExtValue(); 137 Alignment = MI->getAlignment(); 138 139 // If the length is zero, this is a no-op 140 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 141 142 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 143 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 144 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 145 146 Value *Dest = MI->getDest(); 147 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 148 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 149 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy); 150 151 // Alignment 0 is identity for alignment 1 for memset, but not store. 152 if (Alignment == 0) Alignment = 1; 153 154 // Extract the fill value and store. 155 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 156 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest, 157 MI->isVolatile()); 158 S->setAlignment(Alignment); 159 160 // Set the size of the copy to 0, it will be deleted on the next iteration. 161 MI->setLength(Constant::getNullValue(LenC->getType())); 162 return MI; 163 } 164 165 return 0; 166} 167 168/// visitCallInst - CallInst simplification. This mostly only handles folding 169/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 170/// the heavy lifting. 171/// 172Instruction *InstCombiner::visitCallInst(CallInst &CI) { 173 if (isFreeCall(&CI)) 174 return visitFree(CI); 175 if (extractMallocCall(&CI) || extractCallocCall(&CI)) 176 return visitMalloc(CI); 177 178 // If the caller function is nounwind, mark the call as nounwind, even if the 179 // callee isn't. 180 if (CI.getParent()->getParent()->doesNotThrow() && 181 !CI.doesNotThrow()) { 182 CI.setDoesNotThrow(); 183 return &CI; 184 } 185 186 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 187 if (!II) return visitCallSite(&CI); 188 189 // Intrinsics cannot occur in an invoke, so handle them here instead of in 190 // visitCallSite. 191 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 192 bool Changed = false; 193 194 // memmove/cpy/set of zero bytes is a noop. 195 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 196 if (NumBytes->isNullValue()) 197 return EraseInstFromFunction(CI); 198 199 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 200 if (CI->getZExtValue() == 1) { 201 // Replace the instruction with just byte operations. We would 202 // transform other cases to loads/stores, but we don't know if 203 // alignment is sufficient. 204 } 205 } 206 207 // No other transformations apply to volatile transfers. 208 if (MI->isVolatile()) 209 return 0; 210 211 // If we have a memmove and the source operation is a constant global, 212 // then the source and dest pointers can't alias, so we can change this 213 // into a call to memcpy. 214 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 215 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 216 if (GVSrc->isConstant()) { 217 Module *M = CI.getParent()->getParent()->getParent(); 218 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 219 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 220 CI.getArgOperand(1)->getType(), 221 CI.getArgOperand(2)->getType() }; 222 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 223 Changed = true; 224 } 225 } 226 227 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 228 // memmove(x,x,size) -> noop. 229 if (MTI->getSource() == MTI->getDest()) 230 return EraseInstFromFunction(CI); 231 } 232 233 // If we can determine a pointer alignment that is bigger than currently 234 // set, update the alignment. 235 if (isa<MemTransferInst>(MI)) { 236 if (Instruction *I = SimplifyMemTransfer(MI)) 237 return I; 238 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 239 if (Instruction *I = SimplifyMemSet(MSI)) 240 return I; 241 } 242 243 if (Changed) return II; 244 } 245 246 switch (II->getIntrinsicID()) { 247 default: break; 248 case Intrinsic::objectsize: { 249 // We need target data for just about everything so depend on it. 250 if (!TD) return 0; 251 252 Type *ReturnTy = CI.getType(); 253 uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL; 254 255 // Get to the real allocated thing and offset as fast as possible. 256 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 257 258 uint64_t Offset = 0; 259 uint64_t Size = -1ULL; 260 261 // Try to look through constant GEPs. 262 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) { 263 if (!GEP->hasAllConstantIndices()) return 0; 264 265 // Get the current byte offset into the thing. Use the original 266 // operand in case we're looking through a bitcast. 267 SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end()); 268 if (!GEP->getPointerOperandType()->isPointerTy()) 269 return 0; 270 Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops); 271 272 Op1 = GEP->getPointerOperand()->stripPointerCasts(); 273 274 // Make sure we're not a constant offset from an external 275 // global. 276 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) 277 if (!GV->hasDefinitiveInitializer()) return 0; 278 } 279 280 // If we've stripped down to a single global variable that we 281 // can know the size of then just return that. 282 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 283 if (GV->hasDefinitiveInitializer()) { 284 Constant *C = GV->getInitializer(); 285 Size = TD->getTypeAllocSize(C->getType()); 286 } else { 287 // Can't determine size of the GV. 288 Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow); 289 return ReplaceInstUsesWith(CI, RetVal); 290 } 291 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 292 // Get alloca size. 293 if (AI->getAllocatedType()->isSized()) { 294 Size = TD->getTypeAllocSize(AI->getAllocatedType()); 295 if (AI->isArrayAllocation()) { 296 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 297 if (!C) return 0; 298 Size *= C->getZExtValue(); 299 } 300 } 301 } else if (CallInst *MI = extractMallocCall(Op1)) { 302 // Get allocation size. 303 Value *Arg = MI->getArgOperand(0); 304 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) 305 Size = CI->getZExtValue(); 306 307 } else if (CallInst *MI = extractCallocCall(Op1)) { 308 // Get allocation size. 309 Value *Arg1 = MI->getArgOperand(0); 310 Value *Arg2 = MI->getArgOperand(1); 311 if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1)) 312 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2)) { 313 bool overflow; 314 APInt SizeAP = CI1->getValue().umul_ov(CI2->getValue(), overflow); 315 if (!overflow) 316 Size = SizeAP.getZExtValue(); 317 else 318 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow)); 319 } 320 } 321 322 // Do not return "I don't know" here. Later optimization passes could 323 // make it possible to evaluate objectsize to a constant. 324 if (Size == -1ULL) 325 return 0; 326 327 if (Size < Offset) { 328 // Out of bound reference? Negative index normalized to large 329 // index? Just return "I don't know". 330 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow)); 331 } 332 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset)); 333 } 334 case Intrinsic::bswap: 335 // bswap(bswap(x)) -> x 336 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 337 if (Operand->getIntrinsicID() == Intrinsic::bswap) 338 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 339 340 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 341 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 342 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 343 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 344 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 345 TI->getType()->getPrimitiveSizeInBits(); 346 Value *CV = ConstantInt::get(Operand->getType(), C); 347 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 348 return new TruncInst(V, TI->getType()); 349 } 350 } 351 352 break; 353 case Intrinsic::powi: 354 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 355 // powi(x, 0) -> 1.0 356 if (Power->isZero()) 357 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 358 // powi(x, 1) -> x 359 if (Power->isOne()) 360 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 361 // powi(x, -1) -> 1/x 362 if (Power->isAllOnesValue()) 363 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 364 II->getArgOperand(0)); 365 } 366 break; 367 case Intrinsic::cttz: { 368 // If all bits below the first known one are known zero, 369 // this value is constant. 370 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType()); 371 // FIXME: Try to simplify vectors of integers. 372 if (!IT) break; 373 uint32_t BitWidth = IT->getBitWidth(); 374 APInt KnownZero(BitWidth, 0); 375 APInt KnownOne(BitWidth, 0); 376 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne); 377 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 378 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 379 if ((Mask & KnownZero) == Mask) 380 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 381 APInt(BitWidth, TrailingZeros))); 382 383 } 384 break; 385 case Intrinsic::ctlz: { 386 // If all bits above the first known one are known zero, 387 // this value is constant. 388 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType()); 389 // FIXME: Try to simplify vectors of integers. 390 if (!IT) break; 391 uint32_t BitWidth = IT->getBitWidth(); 392 APInt KnownZero(BitWidth, 0); 393 APInt KnownOne(BitWidth, 0); 394 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne); 395 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 396 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 397 if ((Mask & KnownZero) == Mask) 398 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 399 APInt(BitWidth, LeadingZeros))); 400 401 } 402 break; 403 case Intrinsic::uadd_with_overflow: { 404 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 405 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 406 uint32_t BitWidth = IT->getBitWidth(); 407 APInt LHSKnownZero(BitWidth, 0); 408 APInt LHSKnownOne(BitWidth, 0); 409 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne); 410 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 411 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 412 413 if (LHSKnownNegative || LHSKnownPositive) { 414 APInt RHSKnownZero(BitWidth, 0); 415 APInt RHSKnownOne(BitWidth, 0); 416 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne); 417 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 418 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 419 if (LHSKnownNegative && RHSKnownNegative) { 420 // The sign bit is set in both cases: this MUST overflow. 421 // Create a simple add instruction, and insert it into the struct. 422 Value *Add = Builder->CreateAdd(LHS, RHS); 423 Add->takeName(&CI); 424 Constant *V[] = { 425 UndefValue::get(LHS->getType()), 426 ConstantInt::getTrue(II->getContext()) 427 }; 428 StructType *ST = cast<StructType>(II->getType()); 429 Constant *Struct = ConstantStruct::get(ST, V); 430 return InsertValueInst::Create(Struct, Add, 0); 431 } 432 433 if (LHSKnownPositive && RHSKnownPositive) { 434 // The sign bit is clear in both cases: this CANNOT overflow. 435 // Create a simple add instruction, and insert it into the struct. 436 Value *Add = Builder->CreateNUWAdd(LHS, RHS); 437 Add->takeName(&CI); 438 Constant *V[] = { 439 UndefValue::get(LHS->getType()), 440 ConstantInt::getFalse(II->getContext()) 441 }; 442 StructType *ST = cast<StructType>(II->getType()); 443 Constant *Struct = ConstantStruct::get(ST, V); 444 return InsertValueInst::Create(Struct, Add, 0); 445 } 446 } 447 } 448 // FALL THROUGH uadd into sadd 449 case Intrinsic::sadd_with_overflow: 450 // Canonicalize constants into the RHS. 451 if (isa<Constant>(II->getArgOperand(0)) && 452 !isa<Constant>(II->getArgOperand(1))) { 453 Value *LHS = II->getArgOperand(0); 454 II->setArgOperand(0, II->getArgOperand(1)); 455 II->setArgOperand(1, LHS); 456 return II; 457 } 458 459 // X + undef -> undef 460 if (isa<UndefValue>(II->getArgOperand(1))) 461 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 462 463 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 464 // X + 0 -> {X, false} 465 if (RHS->isZero()) { 466 Constant *V[] = { 467 UndefValue::get(II->getArgOperand(0)->getType()), 468 ConstantInt::getFalse(II->getContext()) 469 }; 470 Constant *Struct = 471 ConstantStruct::get(cast<StructType>(II->getType()), V); 472 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 473 } 474 } 475 break; 476 case Intrinsic::usub_with_overflow: 477 case Intrinsic::ssub_with_overflow: 478 // undef - X -> undef 479 // X - undef -> undef 480 if (isa<UndefValue>(II->getArgOperand(0)) || 481 isa<UndefValue>(II->getArgOperand(1))) 482 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 483 484 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 485 // X - 0 -> {X, false} 486 if (RHS->isZero()) { 487 Constant *V[] = { 488 UndefValue::get(II->getArgOperand(0)->getType()), 489 ConstantInt::getFalse(II->getContext()) 490 }; 491 Constant *Struct = 492 ConstantStruct::get(cast<StructType>(II->getType()), V); 493 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 494 } 495 } 496 break; 497 case Intrinsic::umul_with_overflow: { 498 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 499 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth(); 500 501 APInt LHSKnownZero(BitWidth, 0); 502 APInt LHSKnownOne(BitWidth, 0); 503 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne); 504 APInt RHSKnownZero(BitWidth, 0); 505 APInt RHSKnownOne(BitWidth, 0); 506 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne); 507 508 // Get the largest possible values for each operand. 509 APInt LHSMax = ~LHSKnownZero; 510 APInt RHSMax = ~RHSKnownZero; 511 512 // If multiplying the maximum values does not overflow then we can turn 513 // this into a plain NUW mul. 514 bool Overflow; 515 LHSMax.umul_ov(RHSMax, Overflow); 516 if (!Overflow) { 517 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow"); 518 Constant *V[] = { 519 UndefValue::get(LHS->getType()), 520 Builder->getFalse() 521 }; 522 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V); 523 return InsertValueInst::Create(Struct, Mul, 0); 524 } 525 } // FALL THROUGH 526 case Intrinsic::smul_with_overflow: 527 // Canonicalize constants into the RHS. 528 if (isa<Constant>(II->getArgOperand(0)) && 529 !isa<Constant>(II->getArgOperand(1))) { 530 Value *LHS = II->getArgOperand(0); 531 II->setArgOperand(0, II->getArgOperand(1)); 532 II->setArgOperand(1, LHS); 533 return II; 534 } 535 536 // X * undef -> undef 537 if (isa<UndefValue>(II->getArgOperand(1))) 538 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 539 540 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 541 // X*0 -> {0, false} 542 if (RHSI->isZero()) 543 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 544 545 // X * 1 -> {X, false} 546 if (RHSI->equalsInt(1)) { 547 Constant *V[] = { 548 UndefValue::get(II->getArgOperand(0)->getType()), 549 ConstantInt::getFalse(II->getContext()) 550 }; 551 Constant *Struct = 552 ConstantStruct::get(cast<StructType>(II->getType()), V); 553 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 554 } 555 } 556 break; 557 case Intrinsic::ppc_altivec_lvx: 558 case Intrinsic::ppc_altivec_lvxl: 559 // Turn PPC lvx -> load if the pointer is known aligned. 560 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 561 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 562 PointerType::getUnqual(II->getType())); 563 return new LoadInst(Ptr); 564 } 565 break; 566 case Intrinsic::ppc_altivec_stvx: 567 case Intrinsic::ppc_altivec_stvxl: 568 // Turn stvx -> store if the pointer is known aligned. 569 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) { 570 Type *OpPtrTy = 571 PointerType::getUnqual(II->getArgOperand(0)->getType()); 572 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 573 return new StoreInst(II->getArgOperand(0), Ptr); 574 } 575 break; 576 case Intrinsic::x86_sse_storeu_ps: 577 case Intrinsic::x86_sse2_storeu_pd: 578 case Intrinsic::x86_sse2_storeu_dq: 579 // Turn X86 storeu -> store if the pointer is known aligned. 580 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 581 Type *OpPtrTy = 582 PointerType::getUnqual(II->getArgOperand(1)->getType()); 583 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 584 return new StoreInst(II->getArgOperand(1), Ptr); 585 } 586 break; 587 588 case Intrinsic::x86_sse_cvtss2si: 589 case Intrinsic::x86_sse_cvtss2si64: 590 case Intrinsic::x86_sse_cvttss2si: 591 case Intrinsic::x86_sse_cvttss2si64: 592 case Intrinsic::x86_sse2_cvtsd2si: 593 case Intrinsic::x86_sse2_cvtsd2si64: 594 case Intrinsic::x86_sse2_cvttsd2si: 595 case Intrinsic::x86_sse2_cvttsd2si64: { 596 // These intrinsics only demand the 0th element of their input vectors. If 597 // we can simplify the input based on that, do so now. 598 unsigned VWidth = 599 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 600 APInt DemandedElts(VWidth, 1); 601 APInt UndefElts(VWidth, 0); 602 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 603 DemandedElts, UndefElts)) { 604 II->setArgOperand(0, V); 605 return II; 606 } 607 break; 608 } 609 610 611 case Intrinsic::x86_sse41_pmovsxbw: 612 case Intrinsic::x86_sse41_pmovsxwd: 613 case Intrinsic::x86_sse41_pmovsxdq: 614 case Intrinsic::x86_sse41_pmovzxbw: 615 case Intrinsic::x86_sse41_pmovzxwd: 616 case Intrinsic::x86_sse41_pmovzxdq: { 617 // pmov{s|z}x ignores the upper half of their input vectors. 618 unsigned VWidth = 619 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 620 unsigned LowHalfElts = VWidth / 2; 621 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts)); 622 APInt UndefElts(VWidth, 0); 623 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), 624 InputDemandedElts, 625 UndefElts)) { 626 II->setArgOperand(0, TmpV); 627 return II; 628 } 629 break; 630 } 631 632 case Intrinsic::ppc_altivec_vperm: 633 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 634 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) { 635 assert(Mask->getType()->getVectorNumElements() == 16 && 636 "Bad type for intrinsic!"); 637 638 // Check that all of the elements are integer constants or undefs. 639 bool AllEltsOk = true; 640 for (unsigned i = 0; i != 16; ++i) { 641 Constant *Elt = Mask->getAggregateElement(i); 642 if (Elt == 0 || 643 !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) { 644 AllEltsOk = false; 645 break; 646 } 647 } 648 649 if (AllEltsOk) { 650 // Cast the input vectors to byte vectors. 651 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 652 Mask->getType()); 653 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 654 Mask->getType()); 655 Value *Result = UndefValue::get(Op0->getType()); 656 657 // Only extract each element once. 658 Value *ExtractedElts[32]; 659 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 660 661 for (unsigned i = 0; i != 16; ++i) { 662 if (isa<UndefValue>(Mask->getAggregateElement(i))) 663 continue; 664 unsigned Idx = 665 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue(); 666 Idx &= 31; // Match the hardware behavior. 667 668 if (ExtractedElts[Idx] == 0) { 669 ExtractedElts[Idx] = 670 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 671 Builder->getInt32(Idx&15)); 672 } 673 674 // Insert this value into the result vector. 675 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 676 Builder->getInt32(i)); 677 } 678 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 679 } 680 } 681 break; 682 683 case Intrinsic::arm_neon_vld1: 684 case Intrinsic::arm_neon_vld2: 685 case Intrinsic::arm_neon_vld3: 686 case Intrinsic::arm_neon_vld4: 687 case Intrinsic::arm_neon_vld2lane: 688 case Intrinsic::arm_neon_vld3lane: 689 case Intrinsic::arm_neon_vld4lane: 690 case Intrinsic::arm_neon_vst1: 691 case Intrinsic::arm_neon_vst2: 692 case Intrinsic::arm_neon_vst3: 693 case Intrinsic::arm_neon_vst4: 694 case Intrinsic::arm_neon_vst2lane: 695 case Intrinsic::arm_neon_vst3lane: 696 case Intrinsic::arm_neon_vst4lane: { 697 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD); 698 unsigned AlignArg = II->getNumArgOperands() - 1; 699 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 700 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 701 II->setArgOperand(AlignArg, 702 ConstantInt::get(Type::getInt32Ty(II->getContext()), 703 MemAlign, false)); 704 return II; 705 } 706 break; 707 } 708 709 case Intrinsic::arm_neon_vmulls: 710 case Intrinsic::arm_neon_vmullu: { 711 Value *Arg0 = II->getArgOperand(0); 712 Value *Arg1 = II->getArgOperand(1); 713 714 // Handle mul by zero first: 715 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 716 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 717 } 718 719 // Check for constant LHS & RHS - in this case we just simplify. 720 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu); 721 VectorType *NewVT = cast<VectorType>(II->getType()); 722 unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth(); 723 if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) { 724 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) { 725 VectorType* VT = cast<VectorType>(CV0->getType()); 726 SmallVector<Constant*, 4> NewElems; 727 for (unsigned i = 0; i < VT->getNumElements(); ++i) { 728 APInt CV0E = 729 (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue(); 730 CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth); 731 APInt CV1E = 732 (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue(); 733 CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth); 734 NewElems.push_back( 735 ConstantInt::get(NewVT->getElementType(), CV0E * CV1E)); 736 } 737 return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems)); 738 } 739 740 // Couldn't simplify - cannonicalize constant to the RHS. 741 std::swap(Arg0, Arg1); 742 } 743 744 // Handle mul by one: 745 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) { 746 if (ConstantInt *Splat = 747 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) { 748 if (Splat->isOne()) { 749 if (Zext) 750 return CastInst::CreateZExtOrBitCast(Arg0, II->getType()); 751 // else 752 return CastInst::CreateSExtOrBitCast(Arg0, II->getType()); 753 } 754 } 755 } 756 757 break; 758 } 759 760 case Intrinsic::stackrestore: { 761 // If the save is right next to the restore, remove the restore. This can 762 // happen when variable allocas are DCE'd. 763 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 764 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 765 BasicBlock::iterator BI = SS; 766 if (&*++BI == II) 767 return EraseInstFromFunction(CI); 768 } 769 } 770 771 // Scan down this block to see if there is another stack restore in the 772 // same block without an intervening call/alloca. 773 BasicBlock::iterator BI = II; 774 TerminatorInst *TI = II->getParent()->getTerminator(); 775 bool CannotRemove = false; 776 for (++BI; &*BI != TI; ++BI) { 777 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 778 CannotRemove = true; 779 break; 780 } 781 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 782 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 783 // If there is a stackrestore below this one, remove this one. 784 if (II->getIntrinsicID() == Intrinsic::stackrestore) 785 return EraseInstFromFunction(CI); 786 // Otherwise, ignore the intrinsic. 787 } else { 788 // If we found a non-intrinsic call, we can't remove the stack 789 // restore. 790 CannotRemove = true; 791 break; 792 } 793 } 794 } 795 796 // If the stack restore is in a return, resume, or unwind block and if there 797 // are no allocas or calls between the restore and the return, nuke the 798 // restore. 799 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 800 return EraseInstFromFunction(CI); 801 break; 802 } 803 } 804 805 return visitCallSite(II); 806} 807 808// InvokeInst simplification 809// 810Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 811 return visitCallSite(&II); 812} 813 814/// isSafeToEliminateVarargsCast - If this cast does not affect the value 815/// passed through the varargs area, we can eliminate the use of the cast. 816static bool isSafeToEliminateVarargsCast(const CallSite CS, 817 const CastInst * const CI, 818 const TargetData * const TD, 819 const int ix) { 820 if (!CI->isLosslessCast()) 821 return false; 822 823 // The size of ByVal arguments is derived from the type, so we 824 // can't change to a type with a different size. If the size were 825 // passed explicitly we could avoid this check. 826 if (!CS.isByValArgument(ix)) 827 return true; 828 829 Type* SrcTy = 830 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 831 Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 832 if (!SrcTy->isSized() || !DstTy->isSized()) 833 return false; 834 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 835 return false; 836 return true; 837} 838 839namespace { 840class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 841 InstCombiner *IC; 842protected: 843 void replaceCall(Value *With) { 844 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 845 } 846 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 847 if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp)) 848 return true; 849 if (ConstantInt *SizeCI = 850 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 851 if (SizeCI->isAllOnesValue()) 852 return true; 853 if (isString) { 854 uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp)); 855 // If the length is 0 we don't know how long it is and so we can't 856 // remove the check. 857 if (Len == 0) return false; 858 return SizeCI->getZExtValue() >= Len; 859 } 860 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 861 CI->getArgOperand(SizeArgOp))) 862 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 863 } 864 return false; 865 } 866public: 867 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 868 Instruction *NewInstruction; 869}; 870} // end anonymous namespace 871 872// Try to fold some different type of calls here. 873// Currently we're only working with the checking functions, memcpy_chk, 874// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 875// strcat_chk and strncat_chk. 876Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 877 if (CI->getCalledFunction() == 0) return 0; 878 879 InstCombineFortifiedLibCalls Simplifier(this); 880 Simplifier.fold(CI, TD); 881 return Simplifier.NewInstruction; 882} 883 884static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) { 885 // Strip off at most one level of pointer casts, looking for an alloca. This 886 // is good enough in practice and simpler than handling any number of casts. 887 Value *Underlying = TrampMem->stripPointerCasts(); 888 if (Underlying != TrampMem && 889 (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem)) 890 return 0; 891 if (!isa<AllocaInst>(Underlying)) 892 return 0; 893 894 IntrinsicInst *InitTrampoline = 0; 895 for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end(); 896 I != E; I++) { 897 IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I); 898 if (!II) 899 return 0; 900 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 901 if (InitTrampoline) 902 // More than one init_trampoline writes to this value. Give up. 903 return 0; 904 InitTrampoline = II; 905 continue; 906 } 907 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 908 // Allow any number of calls to adjust.trampoline. 909 continue; 910 return 0; 911 } 912 913 // No call to init.trampoline found. 914 if (!InitTrampoline) 915 return 0; 916 917 // Check that the alloca is being used in the expected way. 918 if (InitTrampoline->getOperand(0) != TrampMem) 919 return 0; 920 921 return InitTrampoline; 922} 923 924static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 925 Value *TrampMem) { 926 // Visit all the previous instructions in the basic block, and try to find a 927 // init.trampoline which has a direct path to the adjust.trampoline. 928 for (BasicBlock::iterator I = AdjustTramp, 929 E = AdjustTramp->getParent()->begin(); I != E; ) { 930 Instruction *Inst = --I; 931 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 932 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 933 II->getOperand(0) == TrampMem) 934 return II; 935 if (Inst->mayWriteToMemory()) 936 return 0; 937 } 938 return 0; 939} 940 941// Given a call to llvm.adjust.trampoline, find and return the corresponding 942// call to llvm.init.trampoline if the call to the trampoline can be optimized 943// to a direct call to a function. Otherwise return NULL. 944// 945static IntrinsicInst *FindInitTrampoline(Value *Callee) { 946 Callee = Callee->stripPointerCasts(); 947 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 948 if (!AdjustTramp || 949 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 950 return 0; 951 952 Value *TrampMem = AdjustTramp->getOperand(0); 953 954 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem)) 955 return IT; 956 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem)) 957 return IT; 958 return 0; 959} 960 961// visitCallSite - Improvements for call and invoke instructions. 962// 963Instruction *InstCombiner::visitCallSite(CallSite CS) { 964 bool Changed = false; 965 966 // If the callee is a pointer to a function, attempt to move any casts to the 967 // arguments of the call/invoke. 968 Value *Callee = CS.getCalledValue(); 969 if (!isa<Function>(Callee) && transformConstExprCastCall(CS)) 970 return 0; 971 972 if (Function *CalleeF = dyn_cast<Function>(Callee)) 973 // If the call and callee calling conventions don't match, this call must 974 // be unreachable, as the call is undefined. 975 if (CalleeF->getCallingConv() != CS.getCallingConv() && 976 // Only do this for calls to a function with a body. A prototype may 977 // not actually end up matching the implementation's calling conv for a 978 // variety of reasons (e.g. it may be written in assembly). 979 !CalleeF->isDeclaration()) { 980 Instruction *OldCall = CS.getInstruction(); 981 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 982 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 983 OldCall); 984 // If OldCall dues not return void then replaceAllUsesWith undef. 985 // This allows ValueHandlers and custom metadata to adjust itself. 986 if (!OldCall->getType()->isVoidTy()) 987 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType())); 988 if (isa<CallInst>(OldCall)) 989 return EraseInstFromFunction(*OldCall); 990 991 // We cannot remove an invoke, because it would change the CFG, just 992 // change the callee to a null pointer. 993 cast<InvokeInst>(OldCall)->setCalledFunction( 994 Constant::getNullValue(CalleeF->getType())); 995 return 0; 996 } 997 998 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 999 // This instruction is not reachable, just remove it. We insert a store to 1000 // undef so that we know that this code is not reachable, despite the fact 1001 // that we can't modify the CFG here. 1002 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 1003 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 1004 CS.getInstruction()); 1005 1006 // If CS does not return void then replaceAllUsesWith undef. 1007 // This allows ValueHandlers and custom metadata to adjust itself. 1008 if (!CS.getInstruction()->getType()->isVoidTy()) 1009 ReplaceInstUsesWith(*CS.getInstruction(), 1010 UndefValue::get(CS.getInstruction()->getType())); 1011 1012 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1013 // Don't break the CFG, insert a dummy cond branch. 1014 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 1015 ConstantInt::getTrue(Callee->getContext()), II); 1016 } 1017 return EraseInstFromFunction(*CS.getInstruction()); 1018 } 1019 1020 if (IntrinsicInst *II = FindInitTrampoline(Callee)) 1021 return transformCallThroughTrampoline(CS, II); 1022 1023 PointerType *PTy = cast<PointerType>(Callee->getType()); 1024 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1025 if (FTy->isVarArg()) { 1026 int ix = FTy->getNumParams(); 1027 // See if we can optimize any arguments passed through the varargs area of 1028 // the call. 1029 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 1030 E = CS.arg_end(); I != E; ++I, ++ix) { 1031 CastInst *CI = dyn_cast<CastInst>(*I); 1032 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 1033 *I = CI->getOperand(0); 1034 Changed = true; 1035 } 1036 } 1037 } 1038 1039 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 1040 // Inline asm calls cannot throw - mark them 'nounwind'. 1041 CS.setDoesNotThrow(); 1042 Changed = true; 1043 } 1044 1045 // Try to optimize the call if possible, we require TargetData for most of 1046 // this. None of these calls are seen as possibly dead so go ahead and 1047 // delete the instruction now. 1048 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 1049 Instruction *I = tryOptimizeCall(CI, TD); 1050 // If we changed something return the result, etc. Otherwise let 1051 // the fallthrough check. 1052 if (I) return EraseInstFromFunction(*I); 1053 } 1054 1055 return Changed ? CS.getInstruction() : 0; 1056} 1057 1058// transformConstExprCastCall - If the callee is a constexpr cast of a function, 1059// attempt to move the cast to the arguments of the call/invoke. 1060// 1061bool InstCombiner::transformConstExprCastCall(CallSite CS) { 1062 Function *Callee = 1063 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1064 if (Callee == 0) 1065 return false; 1066 Instruction *Caller = CS.getInstruction(); 1067 const AttrListPtr &CallerPAL = CS.getAttributes(); 1068 1069 // Okay, this is a cast from a function to a different type. Unless doing so 1070 // would cause a type conversion of one of our arguments, change this call to 1071 // be a direct call with arguments casted to the appropriate types. 1072 // 1073 FunctionType *FT = Callee->getFunctionType(); 1074 Type *OldRetTy = Caller->getType(); 1075 Type *NewRetTy = FT->getReturnType(); 1076 1077 if (NewRetTy->isStructTy()) 1078 return false; // TODO: Handle multiple return values. 1079 1080 // Check to see if we are changing the return type... 1081 if (OldRetTy != NewRetTy) { 1082 if (Callee->isDeclaration() && 1083 // Conversion is ok if changing from one pointer type to another or from 1084 // a pointer to an integer of the same size. 1085 !((OldRetTy->isPointerTy() || !TD || 1086 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 1087 (NewRetTy->isPointerTy() || !TD || 1088 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 1089 return false; // Cannot transform this return value. 1090 1091 if (!Caller->use_empty() && 1092 // void -> non-void is handled specially 1093 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 1094 return false; // Cannot transform this return value. 1095 1096 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 1097 Attributes RAttrs = CallerPAL.getRetAttributes(); 1098 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 1099 return false; // Attribute not compatible with transformed value. 1100 } 1101 1102 // If the callsite is an invoke instruction, and the return value is used by 1103 // a PHI node in a successor, we cannot change the return type of the call 1104 // because there is no place to put the cast instruction (without breaking 1105 // the critical edge). Bail out in this case. 1106 if (!Caller->use_empty()) 1107 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 1108 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 1109 UI != E; ++UI) 1110 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 1111 if (PN->getParent() == II->getNormalDest() || 1112 PN->getParent() == II->getUnwindDest()) 1113 return false; 1114 } 1115 1116 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 1117 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 1118 1119 CallSite::arg_iterator AI = CS.arg_begin(); 1120 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 1121 Type *ParamTy = FT->getParamType(i); 1122 Type *ActTy = (*AI)->getType(); 1123 1124 if (!CastInst::isCastable(ActTy, ParamTy)) 1125 return false; // Cannot transform this parameter value. 1126 1127 Attributes Attrs = CallerPAL.getParamAttributes(i + 1); 1128 if (Attrs & Attribute::typeIncompatible(ParamTy)) 1129 return false; // Attribute not compatible with transformed value. 1130 1131 // If the parameter is passed as a byval argument, then we have to have a 1132 // sized type and the sized type has to have the same size as the old type. 1133 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) { 1134 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 1135 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0) 1136 return false; 1137 1138 Type *CurElTy = cast<PointerType>(ActTy)->getElementType(); 1139 if (TD->getTypeAllocSize(CurElTy) != 1140 TD->getTypeAllocSize(ParamPTy->getElementType())) 1141 return false; 1142 } 1143 1144 // Converting from one pointer type to another or between a pointer and an 1145 // integer of the same size is safe even if we do not have a body. 1146 bool isConvertible = ActTy == ParamTy || 1147 (TD && ((ParamTy->isPointerTy() || 1148 ParamTy == TD->getIntPtrType(Caller->getContext())) && 1149 (ActTy->isPointerTy() || 1150 ActTy == TD->getIntPtrType(Caller->getContext())))); 1151 if (Callee->isDeclaration() && !isConvertible) return false; 1152 } 1153 1154 if (Callee->isDeclaration()) { 1155 // Do not delete arguments unless we have a function body. 1156 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 1157 return false; 1158 1159 // If the callee is just a declaration, don't change the varargsness of the 1160 // call. We don't want to introduce a varargs call where one doesn't 1161 // already exist. 1162 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType()); 1163 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg()) 1164 return false; 1165 1166 // If both the callee and the cast type are varargs, we still have to make 1167 // sure the number of fixed parameters are the same or we have the same 1168 // ABI issues as if we introduce a varargs call. 1169 if (FT->isVarArg() && 1170 cast<FunctionType>(APTy->getElementType())->isVarArg() && 1171 FT->getNumParams() != 1172 cast<FunctionType>(APTy->getElementType())->getNumParams()) 1173 return false; 1174 } 1175 1176 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 1177 !CallerPAL.isEmpty()) 1178 // In this case we have more arguments than the new function type, but we 1179 // won't be dropping them. Check that these extra arguments have attributes 1180 // that are compatible with being a vararg call argument. 1181 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 1182 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 1183 break; 1184 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1185 if (PAttrs & Attribute::VarArgsIncompatible) 1186 return false; 1187 } 1188 1189 1190 // Okay, we decided that this is a safe thing to do: go ahead and start 1191 // inserting cast instructions as necessary. 1192 std::vector<Value*> Args; 1193 Args.reserve(NumActualArgs); 1194 SmallVector<AttributeWithIndex, 8> attrVec; 1195 attrVec.reserve(NumCommonArgs); 1196 1197 // Get any return attributes. 1198 Attributes RAttrs = CallerPAL.getRetAttributes(); 1199 1200 // If the return value is not being used, the type may not be compatible 1201 // with the existing attributes. Wipe out any problematic attributes. 1202 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1203 1204 // Add the new return attributes. 1205 if (RAttrs) 1206 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1207 1208 AI = CS.arg_begin(); 1209 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1210 Type *ParamTy = FT->getParamType(i); 1211 if ((*AI)->getType() == ParamTy) { 1212 Args.push_back(*AI); 1213 } else { 1214 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1215 false, ParamTy, false); 1216 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy)); 1217 } 1218 1219 // Add any parameter attributes. 1220 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1221 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1222 } 1223 1224 // If the function takes more arguments than the call was taking, add them 1225 // now. 1226 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1227 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1228 1229 // If we are removing arguments to the function, emit an obnoxious warning. 1230 if (FT->getNumParams() < NumActualArgs) { 1231 if (!FT->isVarArg()) { 1232 errs() << "WARNING: While resolving call to function '" 1233 << Callee->getName() << "' arguments were dropped!\n"; 1234 } else { 1235 // Add all of the arguments in their promoted form to the arg list. 1236 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1237 Type *PTy = getPromotedType((*AI)->getType()); 1238 if (PTy != (*AI)->getType()) { 1239 // Must promote to pass through va_arg area! 1240 Instruction::CastOps opcode = 1241 CastInst::getCastOpcode(*AI, false, PTy, false); 1242 Args.push_back(Builder->CreateCast(opcode, *AI, PTy)); 1243 } else { 1244 Args.push_back(*AI); 1245 } 1246 1247 // Add any parameter attributes. 1248 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1249 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1250 } 1251 } 1252 } 1253 1254 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1255 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1256 1257 if (NewRetTy->isVoidTy()) 1258 Caller->setName(""); // Void type should not have a name. 1259 1260 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1261 attrVec.end()); 1262 1263 Instruction *NC; 1264 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1265 NC = Builder->CreateInvoke(Callee, II->getNormalDest(), 1266 II->getUnwindDest(), Args); 1267 NC->takeName(II); 1268 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1269 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1270 } else { 1271 CallInst *CI = cast<CallInst>(Caller); 1272 NC = Builder->CreateCall(Callee, Args); 1273 NC->takeName(CI); 1274 if (CI->isTailCall()) 1275 cast<CallInst>(NC)->setTailCall(); 1276 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1277 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1278 } 1279 1280 // Insert a cast of the return type as necessary. 1281 Value *NV = NC; 1282 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1283 if (!NV->getType()->isVoidTy()) { 1284 Instruction::CastOps opcode = 1285 CastInst::getCastOpcode(NC, false, OldRetTy, false); 1286 NV = NC = CastInst::Create(opcode, NC, OldRetTy); 1287 NC->setDebugLoc(Caller->getDebugLoc()); 1288 1289 // If this is an invoke instruction, we should insert it after the first 1290 // non-phi, instruction in the normal successor block. 1291 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1292 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); 1293 InsertNewInstBefore(NC, *I); 1294 } else { 1295 // Otherwise, it's a call, just insert cast right after the call. 1296 InsertNewInstBefore(NC, *Caller); 1297 } 1298 Worklist.AddUsersToWorkList(*Caller); 1299 } else { 1300 NV = UndefValue::get(Caller->getType()); 1301 } 1302 } 1303 1304 if (!Caller->use_empty()) 1305 ReplaceInstUsesWith(*Caller, NV); 1306 1307 EraseInstFromFunction(*Caller); 1308 return true; 1309} 1310 1311// transformCallThroughTrampoline - Turn a call to a function created by 1312// init_trampoline / adjust_trampoline intrinsic pair into a direct call to the 1313// underlying function. 1314// 1315Instruction * 1316InstCombiner::transformCallThroughTrampoline(CallSite CS, 1317 IntrinsicInst *Tramp) { 1318 Value *Callee = CS.getCalledValue(); 1319 PointerType *PTy = cast<PointerType>(Callee->getType()); 1320 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1321 const AttrListPtr &Attrs = CS.getAttributes(); 1322 1323 // If the call already has the 'nest' attribute somewhere then give up - 1324 // otherwise 'nest' would occur twice after splicing in the chain. 1325 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1326 return 0; 1327 1328 assert(Tramp && 1329 "transformCallThroughTrampoline called with incorrect CallSite."); 1330 1331 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1332 PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1333 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1334 1335 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1336 if (!NestAttrs.isEmpty()) { 1337 unsigned NestIdx = 1; 1338 Type *NestTy = 0; 1339 Attributes NestAttr = Attribute::None; 1340 1341 // Look for a parameter marked with the 'nest' attribute. 1342 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1343 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1344 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1345 // Record the parameter type and any other attributes. 1346 NestTy = *I; 1347 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1348 break; 1349 } 1350 1351 if (NestTy) { 1352 Instruction *Caller = CS.getInstruction(); 1353 std::vector<Value*> NewArgs; 1354 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1355 1356 SmallVector<AttributeWithIndex, 8> NewAttrs; 1357 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1358 1359 // Insert the nest argument into the call argument list, which may 1360 // mean appending it. Likewise for attributes. 1361 1362 // Add any result attributes. 1363 if (Attributes Attr = Attrs.getRetAttributes()) 1364 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1365 1366 { 1367 unsigned Idx = 1; 1368 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1369 do { 1370 if (Idx == NestIdx) { 1371 // Add the chain argument and attributes. 1372 Value *NestVal = Tramp->getArgOperand(2); 1373 if (NestVal->getType() != NestTy) 1374 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest"); 1375 NewArgs.push_back(NestVal); 1376 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1377 } 1378 1379 if (I == E) 1380 break; 1381 1382 // Add the original argument and attributes. 1383 NewArgs.push_back(*I); 1384 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1385 NewAttrs.push_back 1386 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1387 1388 ++Idx, ++I; 1389 } while (1); 1390 } 1391 1392 // Add any function attributes. 1393 if (Attributes Attr = Attrs.getFnAttributes()) 1394 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1395 1396 // The trampoline may have been bitcast to a bogus type (FTy). 1397 // Handle this by synthesizing a new function type, equal to FTy 1398 // with the chain parameter inserted. 1399 1400 std::vector<Type*> NewTypes; 1401 NewTypes.reserve(FTy->getNumParams()+1); 1402 1403 // Insert the chain's type into the list of parameter types, which may 1404 // mean appending it. 1405 { 1406 unsigned Idx = 1; 1407 FunctionType::param_iterator I = FTy->param_begin(), 1408 E = FTy->param_end(); 1409 1410 do { 1411 if (Idx == NestIdx) 1412 // Add the chain's type. 1413 NewTypes.push_back(NestTy); 1414 1415 if (I == E) 1416 break; 1417 1418 // Add the original type. 1419 NewTypes.push_back(*I); 1420 1421 ++Idx, ++I; 1422 } while (1); 1423 } 1424 1425 // Replace the trampoline call with a direct call. Let the generic 1426 // code sort out any function type mismatches. 1427 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1428 FTy->isVarArg()); 1429 Constant *NewCallee = 1430 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1431 NestF : ConstantExpr::getBitCast(NestF, 1432 PointerType::getUnqual(NewFTy)); 1433 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1434 NewAttrs.end()); 1435 1436 Instruction *NewCaller; 1437 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1438 NewCaller = InvokeInst::Create(NewCallee, 1439 II->getNormalDest(), II->getUnwindDest(), 1440 NewArgs); 1441 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1442 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1443 } else { 1444 NewCaller = CallInst::Create(NewCallee, NewArgs); 1445 if (cast<CallInst>(Caller)->isTailCall()) 1446 cast<CallInst>(NewCaller)->setTailCall(); 1447 cast<CallInst>(NewCaller)-> 1448 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1449 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1450 } 1451 1452 return NewCaller; 1453 } 1454 } 1455 1456 // Replace the trampoline call with a direct call. Since there is no 'nest' 1457 // parameter, there is no need to adjust the argument list. Let the generic 1458 // code sort out any function type mismatches. 1459 Constant *NewCallee = 1460 NestF->getType() == PTy ? NestF : 1461 ConstantExpr::getBitCast(NestF, PTy); 1462 CS.setCalledFunction(NewCallee); 1463 return CS.getInstruction(); 1464} 1465