InstCombineCalls.cpp revision 165dac08d1bb8428b32a5f39cdd3dbee2888987f
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/Transforms/Utils/BuildLibCalls.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static const Type *getPromotedType(const Type *Ty) { 25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32/// EnforceKnownAlignment - If the specified pointer points to an object that 33/// we control, modify the object's alignment to PrefAlign. This isn't 34/// often possible though. If alignment is important, a more reliable approach 35/// is to simply align all global variables and allocation instructions to 36/// their preferred alignment from the beginning. 37/// 38static unsigned EnforceKnownAlignment(Value *V, 39 unsigned Align, unsigned PrefAlign) { 40 41 User *U = dyn_cast<User>(V); 42 if (!U) return Align; 43 44 switch (Operator::getOpcode(U)) { 45 default: break; 46 case Instruction::BitCast: 47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 48 case Instruction::GetElementPtr: { 49 // If all indexes are zero, it is just the alignment of the base pointer. 50 bool AllZeroOperands = true; 51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 52 if (!isa<Constant>(*i) || 53 !cast<Constant>(*i)->isNullValue()) { 54 AllZeroOperands = false; 55 break; 56 } 57 58 if (AllZeroOperands) { 59 // Treat this like a bitcast. 60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 61 } 62 break; 63 } 64 } 65 66 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 67 // If there is a large requested alignment and we can, bump up the alignment 68 // of the global. 69 if (!GV->isDeclaration()) { 70 if (GV->getAlignment() >= PrefAlign) 71 Align = GV->getAlignment(); 72 else { 73 GV->setAlignment(PrefAlign); 74 Align = PrefAlign; 75 } 76 } 77 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 78 // If there is a requested alignment and if this is an alloca, round up. 79 if (AI->getAlignment() >= PrefAlign) 80 Align = AI->getAlignment(); 81 else { 82 AI->setAlignment(PrefAlign); 83 Align = PrefAlign; 84 } 85 } 86 87 return Align; 88} 89 90/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 91/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 92/// and it is more than the alignment of the ultimate object, see if we can 93/// increase the alignment of the ultimate object, making this check succeed. 94unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 95 unsigned PrefAlign) { 96 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 97 sizeof(PrefAlign) * CHAR_BIT; 98 APInt Mask = APInt::getAllOnesValue(BitWidth); 99 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 100 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 101 unsigned TrailZ = KnownZero.countTrailingOnes(); 102 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 103 104 if (PrefAlign > Align) 105 Align = EnforceKnownAlignment(V, Align, PrefAlign); 106 107 // We don't need to make any adjustment. 108 return Align; 109} 110 111Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 112 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(0)); 113 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(1)); 114 unsigned MinAlign = std::min(DstAlign, SrcAlign); 115 unsigned CopyAlign = MI->getAlignment(); 116 117 if (CopyAlign < MinAlign) { 118 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 119 MinAlign, false)); 120 return MI; 121 } 122 123 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 124 // load/store. 125 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(2)); 126 if (MemOpLength == 0) return 0; 127 128 // Source and destination pointer types are always "i8*" for intrinsic. See 129 // if the size is something we can handle with a single primitive load/store. 130 // A single load+store correctly handles overlapping memory in the memmove 131 // case. 132 unsigned Size = MemOpLength->getZExtValue(); 133 if (Size == 0) return MI; // Delete this mem transfer. 134 135 if (Size > 8 || (Size&(Size-1))) 136 return 0; // If not 1/2/4/8 bytes, exit. 137 138 // Use an integer load+store unless we can find something better. 139 unsigned SrcAddrSp = 140 cast<PointerType>(MI->getOperand(1)->getType())->getAddressSpace(); 141 unsigned DstAddrSp = 142 cast<PointerType>(MI->getOperand(0)->getType())->getAddressSpace(); 143 144 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 145 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 146 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 147 148 // Memcpy forces the use of i8* for the source and destination. That means 149 // that if you're using memcpy to move one double around, you'll get a cast 150 // from double* to i8*. We'd much rather use a double load+store rather than 151 // an i64 load+store, here because this improves the odds that the source or 152 // dest address will be promotable. See if we can find a better type than the 153 // integer datatype. 154 Value *StrippedDest = MI->getOperand(0)->stripPointerCasts(); 155 if (StrippedDest != MI->getOperand(0)) { 156 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 157 ->getElementType(); 158 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 159 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 160 // down through these levels if so. 161 while (!SrcETy->isSingleValueType()) { 162 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 163 if (STy->getNumElements() == 1) 164 SrcETy = STy->getElementType(0); 165 else 166 break; 167 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 168 if (ATy->getNumElements() == 1) 169 SrcETy = ATy->getElementType(); 170 else 171 break; 172 } else 173 break; 174 } 175 176 if (SrcETy->isSingleValueType()) { 177 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 178 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 179 } 180 } 181 } 182 183 184 // If the memcpy/memmove provides better alignment info than we can 185 // infer, use it. 186 SrcAlign = std::max(SrcAlign, CopyAlign); 187 DstAlign = std::max(DstAlign, CopyAlign); 188 189 Value *Src = Builder->CreateBitCast(MI->getOperand(1), NewSrcPtrTy); 190 Value *Dest = Builder->CreateBitCast(MI->getOperand(0), NewDstPtrTy); 191 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign); 192 InsertNewInstBefore(L, *MI); 193 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign), 194 *MI); 195 196 // Set the size of the copy to 0, it will be deleted on the next iteration. 197 MI->setOperand(2, Constant::getNullValue(MemOpLength->getType())); 198 return MI; 199} 200 201Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 202 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 203 if (MI->getAlignment() < Alignment) { 204 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 205 Alignment, false)); 206 return MI; 207 } 208 209 // Extract the length and alignment and fill if they are constant. 210 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 211 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 212 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 213 return 0; 214 uint64_t Len = LenC->getZExtValue(); 215 Alignment = MI->getAlignment(); 216 217 // If the length is zero, this is a no-op 218 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 219 220 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 221 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 222 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 223 224 Value *Dest = MI->getDest(); 225 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 226 227 // Alignment 0 is identity for alignment 1 for memset, but not store. 228 if (Alignment == 0) Alignment = 1; 229 230 // Extract the fill value and store. 231 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 232 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 233 Dest, false, Alignment), *MI); 234 235 // Set the size of the copy to 0, it will be deleted on the next iteration. 236 MI->setLength(Constant::getNullValue(LenC->getType())); 237 return MI; 238 } 239 240 return 0; 241} 242 243/// visitCallInst - CallInst simplification. This mostly only handles folding 244/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 245/// the heavy lifting. 246/// 247Instruction *InstCombiner::visitCallInst(CallInst &CI) { 248 if (isFreeCall(&CI)) 249 return visitFree(CI); 250 251 // If the caller function is nounwind, mark the call as nounwind, even if the 252 // callee isn't. 253 if (CI.getParent()->getParent()->doesNotThrow() && 254 !CI.doesNotThrow()) { 255 CI.setDoesNotThrow(); 256 return &CI; 257 } 258 259 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 260 if (!II) return visitCallSite(&CI); 261 262 // Intrinsics cannot occur in an invoke, so handle them here instead of in 263 // visitCallSite. 264 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 265 bool Changed = false; 266 267 // memmove/cpy/set of zero bytes is a noop. 268 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 269 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 270 271 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 272 if (CI->getZExtValue() == 1) { 273 // Replace the instruction with just byte operations. We would 274 // transform other cases to loads/stores, but we don't know if 275 // alignment is sufficient. 276 } 277 } 278 279 // If we have a memmove and the source operation is a constant global, 280 // then the source and dest pointers can't alias, so we can change this 281 // into a call to memcpy. 282 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 283 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 284 if (GVSrc->isConstant()) { 285 Module *M = MMI->getParent()->getParent()->getParent(); 286 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 287 const Type *Tys[3] = { CI.getOperand(0)->getType(), 288 CI.getOperand(1)->getType(), 289 CI.getOperand(2)->getType() }; 290 MMI->setCalledFunction( 291 Intrinsic::getDeclaration(M, MemCpyID, Tys, 3)); 292 Changed = true; 293 } 294 } 295 296 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 297 // memmove(x,x,size) -> noop. 298 if (MTI->getSource() == MTI->getDest()) 299 return EraseInstFromFunction(CI); 300 301 // If we can determine a pointer alignment that is bigger than currently 302 // set, update the alignment. 303 if (Instruction *I = SimplifyMemTransfer(MTI)) 304 return I; 305 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 306 if (Instruction *I = SimplifyMemSet(MSI)) 307 return I; 308 } 309 310 if (Changed) return II; 311 } 312 313 switch (II->getIntrinsicID()) { 314 default: break; 315 case Intrinsic::objectsize: { 316 // We need target data for just about everything so depend on it. 317 if (!TD) break; 318 319 const Type *ReturnTy = CI.getType(); 320 bool Min = (cast<ConstantInt>(II->getOperand(1))->getZExtValue() == 1); 321 322 // Get to the real allocated thing and offset as fast as possible. 323 Value *Op1 = II->getOperand(0)->stripPointerCasts(); 324 325 // If we've stripped down to a single global variable that we 326 // can know the size of then just return that. 327 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 328 if (GV->hasDefinitiveInitializer()) { 329 Constant *C = GV->getInitializer(); 330 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 331 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 332 } else { 333 // Can't determine size of the GV. 334 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 335 return ReplaceInstUsesWith(CI, RetVal); 336 } 337 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 338 // Get alloca size. 339 if (AI->getAllocatedType()->isSized()) { 340 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 341 if (AI->isArrayAllocation()) { 342 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 343 if (!C) break; 344 AllocaSize *= C->getZExtValue(); 345 } 346 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 347 } 348 } else if (CallInst *MI = extractMallocCall(Op1)) { 349 const Type* MallocType = getMallocAllocatedType(MI); 350 // Get alloca size. 351 if (MallocType && MallocType->isSized()) { 352 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 353 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 354 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 355 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 356 } 357 } 358 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 359 // Only handle constant GEPs here. 360 if (CE->getOpcode() != Instruction::GetElementPtr) break; 361 GEPOperator *GEP = cast<GEPOperator>(CE); 362 363 // Make sure we're not a constant offset from an external 364 // global. 365 Value *Operand = GEP->getPointerOperand(); 366 Operand = Operand->stripPointerCasts(); 367 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 368 if (!GV->hasDefinitiveInitializer()) break; 369 370 // Get what we're pointing to and its size. 371 const PointerType *BaseType = 372 cast<PointerType>(Operand->getType()); 373 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 374 375 // Get the current byte offset into the thing. Use the original 376 // operand in case we're looking through a bitcast. 377 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 378 const PointerType *OffsetType = 379 cast<PointerType>(GEP->getPointerOperand()->getType()); 380 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 381 382 if (Size < Offset) { 383 // Out of bound reference? Negative index normalized to large 384 // index? Just return "I don't know". 385 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 386 return ReplaceInstUsesWith(CI, RetVal); 387 } 388 389 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 390 return ReplaceInstUsesWith(CI, RetVal); 391 } 392 393 // Do not return "I don't know" here. Later optimization passes could 394 // make it possible to evaluate objectsize to a constant. 395 break; 396 } 397 case Intrinsic::bswap: 398 // bswap(bswap(x)) -> x 399 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(0))) 400 if (Operand->getIntrinsicID() == Intrinsic::bswap) 401 return ReplaceInstUsesWith(CI, Operand->getOperand(0)); 402 403 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 404 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(0))) { 405 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 406 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 407 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 408 TI->getType()->getPrimitiveSizeInBits(); 409 Value *CV = ConstantInt::get(Operand->getType(), C); 410 Value *V = Builder->CreateLShr(Operand->getOperand(0), CV); 411 return new TruncInst(V, TI->getType()); 412 } 413 } 414 415 break; 416 case Intrinsic::powi: 417 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(1))) { 418 // powi(x, 0) -> 1.0 419 if (Power->isZero()) 420 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 421 // powi(x, 1) -> x 422 if (Power->isOne()) 423 return ReplaceInstUsesWith(CI, II->getOperand(0)); 424 // powi(x, -1) -> 1/x 425 if (Power->isAllOnesValue()) 426 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 427 II->getOperand(0)); 428 } 429 break; 430 case Intrinsic::cttz: { 431 // If all bits below the first known one are known zero, 432 // this value is constant. 433 const IntegerType *IT = cast<IntegerType>(II->getOperand(0)->getType()); 434 uint32_t BitWidth = IT->getBitWidth(); 435 APInt KnownZero(BitWidth, 0); 436 APInt KnownOne(BitWidth, 0); 437 ComputeMaskedBits(II->getOperand(0), APInt::getAllOnesValue(BitWidth), 438 KnownZero, KnownOne); 439 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 440 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 441 if ((Mask & KnownZero) == Mask) 442 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 443 APInt(BitWidth, TrailingZeros))); 444 445 } 446 break; 447 case Intrinsic::ctlz: { 448 // If all bits above the first known one are known zero, 449 // this value is constant. 450 const IntegerType *IT = cast<IntegerType>(II->getOperand(0)->getType()); 451 uint32_t BitWidth = IT->getBitWidth(); 452 APInt KnownZero(BitWidth, 0); 453 APInt KnownOne(BitWidth, 0); 454 ComputeMaskedBits(II->getOperand(0), APInt::getAllOnesValue(BitWidth), 455 KnownZero, KnownOne); 456 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 457 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 458 if ((Mask & KnownZero) == Mask) 459 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 460 APInt(BitWidth, LeadingZeros))); 461 462 } 463 break; 464 case Intrinsic::uadd_with_overflow: { 465 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1); 466 const IntegerType *IT = cast<IntegerType>(II->getOperand(0)->getType()); 467 uint32_t BitWidth = IT->getBitWidth(); 468 APInt Mask = APInt::getSignBit(BitWidth); 469 APInt LHSKnownZero(BitWidth, 0); 470 APInt LHSKnownOne(BitWidth, 0); 471 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 472 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 473 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 474 475 if (LHSKnownNegative || LHSKnownPositive) { 476 APInt RHSKnownZero(BitWidth, 0); 477 APInt RHSKnownOne(BitWidth, 0); 478 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 479 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 480 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 481 if (LHSKnownNegative && RHSKnownNegative) { 482 // The sign bit is set in both cases: this MUST overflow. 483 // Create a simple add instruction, and insert it into the struct. 484 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 485 Worklist.Add(Add); 486 Constant *V[] = { 487 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 488 }; 489 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 490 return InsertValueInst::Create(Struct, Add, 0); 491 } 492 493 if (LHSKnownPositive && RHSKnownPositive) { 494 // The sign bit is clear in both cases: this CANNOT overflow. 495 // Create a simple add instruction, and insert it into the struct. 496 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 497 Worklist.Add(Add); 498 Constant *V[] = { 499 UndefValue::get(LHS->getType()), 500 ConstantInt::getFalse(II->getContext()) 501 }; 502 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 503 return InsertValueInst::Create(Struct, Add, 0); 504 } 505 } 506 } 507 // FALL THROUGH uadd into sadd 508 case Intrinsic::sadd_with_overflow: 509 // Canonicalize constants into the RHS. 510 if (isa<Constant>(II->getOperand(0)) && 511 !isa<Constant>(II->getOperand(1))) { 512 Value *LHS = II->getOperand(0); 513 II->setOperand(0, II->getOperand(1)); 514 II->setOperand(1, LHS); 515 return II; 516 } 517 518 // X + undef -> undef 519 if (isa<UndefValue>(II->getOperand(1))) 520 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 521 522 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(1))) { 523 // X + 0 -> {X, false} 524 if (RHS->isZero()) { 525 Constant *V[] = { 526 UndefValue::get(II->getOperand(0)->getType()), 527 ConstantInt::getFalse(II->getContext()) 528 }; 529 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 530 return InsertValueInst::Create(Struct, II->getOperand(0), 0); 531 } 532 } 533 break; 534 case Intrinsic::usub_with_overflow: 535 case Intrinsic::ssub_with_overflow: 536 // undef - X -> undef 537 // X - undef -> undef 538 if (isa<UndefValue>(II->getOperand(0)) || 539 isa<UndefValue>(II->getOperand(1))) 540 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 541 542 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(1))) { 543 // X - 0 -> {X, false} 544 if (RHS->isZero()) { 545 Constant *V[] = { 546 UndefValue::get(II->getOperand(0)->getType()), 547 ConstantInt::getFalse(II->getContext()) 548 }; 549 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 550 return InsertValueInst::Create(Struct, II->getOperand(0), 0); 551 } 552 } 553 break; 554 case Intrinsic::umul_with_overflow: 555 case Intrinsic::smul_with_overflow: 556 // Canonicalize constants into the RHS. 557 if (isa<Constant>(II->getOperand(0)) && 558 !isa<Constant>(II->getOperand(1))) { 559 Value *LHS = II->getOperand(0); 560 II->setOperand(0, II->getOperand(1)); 561 II->setOperand(1, LHS); 562 return II; 563 } 564 565 // X * undef -> undef 566 if (isa<UndefValue>(II->getOperand(1))) 567 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 568 569 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(1))) { 570 // X*0 -> {0, false} 571 if (RHSI->isZero()) 572 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 573 574 // X * 1 -> {X, false} 575 if (RHSI->equalsInt(1)) { 576 Constant *V[] = { 577 UndefValue::get(II->getOperand(0)->getType()), 578 ConstantInt::getFalse(II->getContext()) 579 }; 580 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 581 return InsertValueInst::Create(Struct, II->getOperand(0), 0); 582 } 583 } 584 break; 585 case Intrinsic::ppc_altivec_lvx: 586 case Intrinsic::ppc_altivec_lvxl: 587 case Intrinsic::x86_sse_loadu_ps: 588 case Intrinsic::x86_sse2_loadu_pd: 589 case Intrinsic::x86_sse2_loadu_dq: 590 // Turn PPC lvx -> load if the pointer is known aligned. 591 // Turn X86 loadups -> load if the pointer is known aligned. 592 if (GetOrEnforceKnownAlignment(II->getOperand(0), 16) >= 16) { 593 Value *Ptr = Builder->CreateBitCast(II->getOperand(0), 594 PointerType::getUnqual(II->getType())); 595 return new LoadInst(Ptr); 596 } 597 break; 598 case Intrinsic::ppc_altivec_stvx: 599 case Intrinsic::ppc_altivec_stvxl: 600 // Turn stvx -> store if the pointer is known aligned. 601 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 602 const Type *OpPtrTy = 603 PointerType::getUnqual(II->getOperand(0)->getType()); 604 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy); 605 return new StoreInst(II->getOperand(0), Ptr); 606 } 607 break; 608 case Intrinsic::x86_sse_storeu_ps: 609 case Intrinsic::x86_sse2_storeu_pd: 610 case Intrinsic::x86_sse2_storeu_dq: 611 // Turn X86 storeu -> store if the pointer is known aligned. 612 if (GetOrEnforceKnownAlignment(II->getOperand(0), 16) >= 16) { 613 const Type *OpPtrTy = 614 PointerType::getUnqual(II->getOperand(1)->getType()); 615 Value *Ptr = Builder->CreateBitCast(II->getOperand(0), OpPtrTy); 616 return new StoreInst(II->getOperand(1), Ptr); 617 } 618 break; 619 620 case Intrinsic::x86_sse_cvttss2si: { 621 // These intrinsics only demands the 0th element of its input vector. If 622 // we can simplify the input based on that, do so now. 623 unsigned VWidth = 624 cast<VectorType>(II->getOperand(0)->getType())->getNumElements(); 625 APInt DemandedElts(VWidth, 1); 626 APInt UndefElts(VWidth, 0); 627 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(0), DemandedElts, 628 UndefElts)) { 629 II->setOperand(0, V); 630 return II; 631 } 632 break; 633 } 634 635 case Intrinsic::ppc_altivec_vperm: 636 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 637 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(2))) { 638 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 639 640 // Check that all of the elements are integer constants or undefs. 641 bool AllEltsOk = true; 642 for (unsigned i = 0; i != 16; ++i) { 643 if (!isa<ConstantInt>(Mask->getOperand(i)) && 644 !isa<UndefValue>(Mask->getOperand(i))) { 645 AllEltsOk = false; 646 break; 647 } 648 } 649 650 if (AllEltsOk) { 651 // Cast the input vectors to byte vectors. 652 Value *Op0 = Builder->CreateBitCast(II->getOperand(0), Mask->getType()); 653 Value *Op1 = Builder->CreateBitCast(II->getOperand(1), Mask->getType()); 654 Value *Result = UndefValue::get(Op0->getType()); 655 656 // Only extract each element once. 657 Value *ExtractedElts[32]; 658 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 659 660 for (unsigned i = 0; i != 16; ++i) { 661 if (isa<UndefValue>(Mask->getOperand(i))) 662 continue; 663 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 664 Idx &= 31; // Match the hardware behavior. 665 666 if (ExtractedElts[Idx] == 0) { 667 ExtractedElts[Idx] = 668 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 669 ConstantInt::get(Type::getInt32Ty(II->getContext()), 670 Idx&15, false), "tmp"); 671 } 672 673 // Insert this value into the result vector. 674 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 675 ConstantInt::get(Type::getInt32Ty(II->getContext()), 676 i, false), "tmp"); 677 } 678 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 679 } 680 } 681 break; 682 683 case Intrinsic::stackrestore: { 684 // If the save is right next to the restore, remove the restore. This can 685 // happen when variable allocas are DCE'd. 686 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(0))) { 687 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 688 BasicBlock::iterator BI = SS; 689 if (&*++BI == II) 690 return EraseInstFromFunction(CI); 691 } 692 } 693 694 // Scan down this block to see if there is another stack restore in the 695 // same block without an intervening call/alloca. 696 BasicBlock::iterator BI = II; 697 TerminatorInst *TI = II->getParent()->getTerminator(); 698 bool CannotRemove = false; 699 for (++BI; &*BI != TI; ++BI) { 700 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 701 CannotRemove = true; 702 break; 703 } 704 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 705 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 706 // If there is a stackrestore below this one, remove this one. 707 if (II->getIntrinsicID() == Intrinsic::stackrestore) 708 return EraseInstFromFunction(CI); 709 // Otherwise, ignore the intrinsic. 710 } else { 711 // If we found a non-intrinsic call, we can't remove the stack 712 // restore. 713 CannotRemove = true; 714 break; 715 } 716 } 717 } 718 719 // If the stack restore is in a return/unwind block and if there are no 720 // allocas or calls between the restore and the return, nuke the restore. 721 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 722 return EraseInstFromFunction(CI); 723 break; 724 } 725 } 726 727 return visitCallSite(II); 728} 729 730// InvokeInst simplification 731// 732Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 733 return visitCallSite(&II); 734} 735 736/// isSafeToEliminateVarargsCast - If this cast does not affect the value 737/// passed through the varargs area, we can eliminate the use of the cast. 738static bool isSafeToEliminateVarargsCast(const CallSite CS, 739 const CastInst * const CI, 740 const TargetData * const TD, 741 const int ix) { 742 if (!CI->isLosslessCast()) 743 return false; 744 745 // The size of ByVal arguments is derived from the type, so we 746 // can't change to a type with a different size. If the size were 747 // passed explicitly we could avoid this check. 748 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 749 return true; 750 751 const Type* SrcTy = 752 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 753 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 754 if (!SrcTy->isSized() || !DstTy->isSized()) 755 return false; 756 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 757 return false; 758 return true; 759} 760 761namespace { 762class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 763 InstCombiner *IC; 764protected: 765 void replaceCall(Value *With) { 766 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 767 } 768 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 769 if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp))) { 770 if (SizeCI->isAllOnesValue()) 771 return true; 772 if (isString) 773 return SizeCI->getZExtValue() >= 774 GetStringLength(CI->getOperand(SizeArgOp)); 775 if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getOperand(SizeArgOp))) 776 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 777 } 778 return false; 779 } 780public: 781 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 782 Instruction *NewInstruction; 783}; 784} // end anonymous namespace 785 786// Try to fold some different type of calls here. 787// Currently we're only working with the checking functions, memcpy_chk, 788// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 789// strcat_chk and strncat_chk. 790Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 791 if (CI->getCalledFunction() == 0) return 0; 792 793 InstCombineFortifiedLibCalls Simplifier(this); 794 Simplifier.fold(CI, TD); 795 return Simplifier.NewInstruction; 796} 797 798// visitCallSite - Improvements for call and invoke instructions. 799// 800Instruction *InstCombiner::visitCallSite(CallSite CS) { 801 bool Changed = false; 802 803 // If the callee is a constexpr cast of a function, attempt to move the cast 804 // to the arguments of the call/invoke. 805 if (transformConstExprCastCall(CS)) return 0; 806 807 Value *Callee = CS.getCalledValue(); 808 809 if (Function *CalleeF = dyn_cast<Function>(Callee)) 810 // If the call and callee calling conventions don't match, this call must 811 // be unreachable, as the call is undefined. 812 if (CalleeF->getCallingConv() != CS.getCallingConv() && 813 // Only do this for calls to a function with a body. A prototype may 814 // not actually end up matching the implementation's calling conv for a 815 // variety of reasons (e.g. it may be written in assembly). 816 !CalleeF->isDeclaration()) { 817 Instruction *OldCall = CS.getInstruction(); 818 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 819 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 820 OldCall); 821 // If OldCall dues not return void then replaceAllUsesWith undef. 822 // This allows ValueHandlers and custom metadata to adjust itself. 823 if (!OldCall->getType()->isVoidTy()) 824 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 825 if (isa<CallInst>(OldCall)) 826 return EraseInstFromFunction(*OldCall); 827 828 // We cannot remove an invoke, because it would change the CFG, just 829 // change the callee to a null pointer. 830 cast<InvokeInst>(OldCall)->setCalledFunction( 831 Constant::getNullValue(CalleeF->getType())); 832 return 0; 833 } 834 835 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 836 // This instruction is not reachable, just remove it. We insert a store to 837 // undef so that we know that this code is not reachable, despite the fact 838 // that we can't modify the CFG here. 839 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 840 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 841 CS.getInstruction()); 842 843 // If CS does not return void then replaceAllUsesWith undef. 844 // This allows ValueHandlers and custom metadata to adjust itself. 845 if (!CS.getInstruction()->getType()->isVoidTy()) 846 CS.getInstruction()-> 847 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 848 849 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 850 // Don't break the CFG, insert a dummy cond branch. 851 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 852 ConstantInt::getTrue(Callee->getContext()), II); 853 } 854 return EraseInstFromFunction(*CS.getInstruction()); 855 } 856 857 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 858 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 859 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 860 return transformCallThroughTrampoline(CS); 861 862 const PointerType *PTy = cast<PointerType>(Callee->getType()); 863 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 864 if (FTy->isVarArg()) { 865 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 866 // See if we can optimize any arguments passed through the varargs area of 867 // the call. 868 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 869 E = CS.arg_end(); I != E; ++I, ++ix) { 870 CastInst *CI = dyn_cast<CastInst>(*I); 871 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 872 *I = CI->getOperand(0); 873 Changed = true; 874 } 875 } 876 } 877 878 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 879 // Inline asm calls cannot throw - mark them 'nounwind'. 880 CS.setDoesNotThrow(); 881 Changed = true; 882 } 883 884 // Try to optimize the call if possible, we require TargetData for most of 885 // this. None of these calls are seen as possibly dead so go ahead and 886 // delete the instruction now. 887 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 888 Instruction *I = tryOptimizeCall(CI, TD); 889 // If we changed something return the result, etc. Otherwise let 890 // the fallthrough check. 891 if (I) return EraseInstFromFunction(*I); 892 } 893 894 return Changed ? CS.getInstruction() : 0; 895} 896 897// transformConstExprCastCall - If the callee is a constexpr cast of a function, 898// attempt to move the cast to the arguments of the call/invoke. 899// 900bool InstCombiner::transformConstExprCastCall(CallSite CS) { 901 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 902 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 903 if (CE->getOpcode() != Instruction::BitCast || 904 !isa<Function>(CE->getOperand(0))) 905 return false; 906 Function *Callee = cast<Function>(CE->getOperand(0)); 907 Instruction *Caller = CS.getInstruction(); 908 const AttrListPtr &CallerPAL = CS.getAttributes(); 909 910 // Okay, this is a cast from a function to a different type. Unless doing so 911 // would cause a type conversion of one of our arguments, change this call to 912 // be a direct call with arguments casted to the appropriate types. 913 // 914 const FunctionType *FT = Callee->getFunctionType(); 915 const Type *OldRetTy = Caller->getType(); 916 const Type *NewRetTy = FT->getReturnType(); 917 918 if (NewRetTy->isStructTy()) 919 return false; // TODO: Handle multiple return values. 920 921 // Check to see if we are changing the return type... 922 if (OldRetTy != NewRetTy) { 923 if (Callee->isDeclaration() && 924 // Conversion is ok if changing from one pointer type to another or from 925 // a pointer to an integer of the same size. 926 !((OldRetTy->isPointerTy() || !TD || 927 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 928 (NewRetTy->isPointerTy() || !TD || 929 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 930 return false; // Cannot transform this return value. 931 932 if (!Caller->use_empty() && 933 // void -> non-void is handled specially 934 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 935 return false; // Cannot transform this return value. 936 937 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 938 Attributes RAttrs = CallerPAL.getRetAttributes(); 939 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 940 return false; // Attribute not compatible with transformed value. 941 } 942 943 // If the callsite is an invoke instruction, and the return value is used by 944 // a PHI node in a successor, we cannot change the return type of the call 945 // because there is no place to put the cast instruction (without breaking 946 // the critical edge). Bail out in this case. 947 if (!Caller->use_empty()) 948 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 949 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 950 UI != E; ++UI) 951 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 952 if (PN->getParent() == II->getNormalDest() || 953 PN->getParent() == II->getUnwindDest()) 954 return false; 955 } 956 957 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 958 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 959 960 CallSite::arg_iterator AI = CS.arg_begin(); 961 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 962 const Type *ParamTy = FT->getParamType(i); 963 const Type *ActTy = (*AI)->getType(); 964 965 if (!CastInst::isCastable(ActTy, ParamTy)) 966 return false; // Cannot transform this parameter value. 967 968 if (CallerPAL.getParamAttributes(i + 1) 969 & Attribute::typeIncompatible(ParamTy)) 970 return false; // Attribute not compatible with transformed value. 971 972 // Converting from one pointer type to another or between a pointer and an 973 // integer of the same size is safe even if we do not have a body. 974 bool isConvertible = ActTy == ParamTy || 975 (TD && ((ParamTy->isPointerTy() || 976 ParamTy == TD->getIntPtrType(Caller->getContext())) && 977 (ActTy->isPointerTy() || 978 ActTy == TD->getIntPtrType(Caller->getContext())))); 979 if (Callee->isDeclaration() && !isConvertible) return false; 980 } 981 982 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 983 Callee->isDeclaration()) 984 return false; // Do not delete arguments unless we have a function body. 985 986 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 987 !CallerPAL.isEmpty()) 988 // In this case we have more arguments than the new function type, but we 989 // won't be dropping them. Check that these extra arguments have attributes 990 // that are compatible with being a vararg call argument. 991 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 992 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 993 break; 994 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 995 if (PAttrs & Attribute::VarArgsIncompatible) 996 return false; 997 } 998 999 // Okay, we decided that this is a safe thing to do: go ahead and start 1000 // inserting cast instructions as necessary... 1001 std::vector<Value*> Args; 1002 Args.reserve(NumActualArgs); 1003 SmallVector<AttributeWithIndex, 8> attrVec; 1004 attrVec.reserve(NumCommonArgs); 1005 1006 // Get any return attributes. 1007 Attributes RAttrs = CallerPAL.getRetAttributes(); 1008 1009 // If the return value is not being used, the type may not be compatible 1010 // with the existing attributes. Wipe out any problematic attributes. 1011 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1012 1013 // Add the new return attributes. 1014 if (RAttrs) 1015 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1016 1017 AI = CS.arg_begin(); 1018 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1019 const Type *ParamTy = FT->getParamType(i); 1020 if ((*AI)->getType() == ParamTy) { 1021 Args.push_back(*AI); 1022 } else { 1023 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1024 false, ParamTy, false); 1025 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1026 } 1027 1028 // Add any parameter attributes. 1029 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1030 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1031 } 1032 1033 // If the function takes more arguments than the call was taking, add them 1034 // now. 1035 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1036 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1037 1038 // If we are removing arguments to the function, emit an obnoxious warning. 1039 if (FT->getNumParams() < NumActualArgs) { 1040 if (!FT->isVarArg()) { 1041 errs() << "WARNING: While resolving call to function '" 1042 << Callee->getName() << "' arguments were dropped!\n"; 1043 } else { 1044 // Add all of the arguments in their promoted form to the arg list. 1045 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1046 const Type *PTy = getPromotedType((*AI)->getType()); 1047 if (PTy != (*AI)->getType()) { 1048 // Must promote to pass through va_arg area! 1049 Instruction::CastOps opcode = 1050 CastInst::getCastOpcode(*AI, false, PTy, false); 1051 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1052 } else { 1053 Args.push_back(*AI); 1054 } 1055 1056 // Add any parameter attributes. 1057 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1058 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1059 } 1060 } 1061 } 1062 1063 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1064 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1065 1066 if (NewRetTy->isVoidTy()) 1067 Caller->setName(""); // Void type should not have a name. 1068 1069 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1070 attrVec.end()); 1071 1072 Instruction *NC; 1073 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1074 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1075 Args.begin(), Args.end(), 1076 Caller->getName(), Caller); 1077 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1078 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1079 } else { 1080 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1081 Caller->getName(), Caller); 1082 CallInst *CI = cast<CallInst>(Caller); 1083 if (CI->isTailCall()) 1084 cast<CallInst>(NC)->setTailCall(); 1085 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1086 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1087 } 1088 1089 // Insert a cast of the return type as necessary. 1090 Value *NV = NC; 1091 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1092 if (!NV->getType()->isVoidTy()) { 1093 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1094 OldRetTy, false); 1095 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1096 1097 // If this is an invoke instruction, we should insert it after the first 1098 // non-phi, instruction in the normal successor block. 1099 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1100 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1101 InsertNewInstBefore(NC, *I); 1102 } else { 1103 // Otherwise, it's a call, just insert cast right after the call instr 1104 InsertNewInstBefore(NC, *Caller); 1105 } 1106 Worklist.AddUsersToWorkList(*Caller); 1107 } else { 1108 NV = UndefValue::get(Caller->getType()); 1109 } 1110 } 1111 1112 1113 if (!Caller->use_empty()) 1114 Caller->replaceAllUsesWith(NV); 1115 1116 EraseInstFromFunction(*Caller); 1117 return true; 1118} 1119 1120// transformCallThroughTrampoline - Turn a call to a function created by the 1121// init_trampoline intrinsic into a direct call to the underlying function. 1122// 1123Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1124 Value *Callee = CS.getCalledValue(); 1125 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1126 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1127 const AttrListPtr &Attrs = CS.getAttributes(); 1128 1129 // If the call already has the 'nest' attribute somewhere then give up - 1130 // otherwise 'nest' would occur twice after splicing in the chain. 1131 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1132 return 0; 1133 1134 IntrinsicInst *Tramp = 1135 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1136 1137 Function *NestF = cast<Function>(Tramp->getOperand(1)->stripPointerCasts()); 1138 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1139 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1140 1141 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1142 if (!NestAttrs.isEmpty()) { 1143 unsigned NestIdx = 1; 1144 const Type *NestTy = 0; 1145 Attributes NestAttr = Attribute::None; 1146 1147 // Look for a parameter marked with the 'nest' attribute. 1148 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1149 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1150 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1151 // Record the parameter type and any other attributes. 1152 NestTy = *I; 1153 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1154 break; 1155 } 1156 1157 if (NestTy) { 1158 Instruction *Caller = CS.getInstruction(); 1159 std::vector<Value*> NewArgs; 1160 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1161 1162 SmallVector<AttributeWithIndex, 8> NewAttrs; 1163 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1164 1165 // Insert the nest argument into the call argument list, which may 1166 // mean appending it. Likewise for attributes. 1167 1168 // Add any result attributes. 1169 if (Attributes Attr = Attrs.getRetAttributes()) 1170 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1171 1172 { 1173 unsigned Idx = 1; 1174 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1175 do { 1176 if (Idx == NestIdx) { 1177 // Add the chain argument and attributes. 1178 Value *NestVal = Tramp->getOperand(2); 1179 if (NestVal->getType() != NestTy) 1180 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1181 NewArgs.push_back(NestVal); 1182 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1183 } 1184 1185 if (I == E) 1186 break; 1187 1188 // Add the original argument and attributes. 1189 NewArgs.push_back(*I); 1190 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1191 NewAttrs.push_back 1192 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1193 1194 ++Idx, ++I; 1195 } while (1); 1196 } 1197 1198 // Add any function attributes. 1199 if (Attributes Attr = Attrs.getFnAttributes()) 1200 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1201 1202 // The trampoline may have been bitcast to a bogus type (FTy). 1203 // Handle this by synthesizing a new function type, equal to FTy 1204 // with the chain parameter inserted. 1205 1206 std::vector<const Type*> NewTypes; 1207 NewTypes.reserve(FTy->getNumParams()+1); 1208 1209 // Insert the chain's type into the list of parameter types, which may 1210 // mean appending it. 1211 { 1212 unsigned Idx = 1; 1213 FunctionType::param_iterator I = FTy->param_begin(), 1214 E = FTy->param_end(); 1215 1216 do { 1217 if (Idx == NestIdx) 1218 // Add the chain's type. 1219 NewTypes.push_back(NestTy); 1220 1221 if (I == E) 1222 break; 1223 1224 // Add the original type. 1225 NewTypes.push_back(*I); 1226 1227 ++Idx, ++I; 1228 } while (1); 1229 } 1230 1231 // Replace the trampoline call with a direct call. Let the generic 1232 // code sort out any function type mismatches. 1233 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1234 FTy->isVarArg()); 1235 Constant *NewCallee = 1236 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1237 NestF : ConstantExpr::getBitCast(NestF, 1238 PointerType::getUnqual(NewFTy)); 1239 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1240 NewAttrs.end()); 1241 1242 Instruction *NewCaller; 1243 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1244 NewCaller = InvokeInst::Create(NewCallee, 1245 II->getNormalDest(), II->getUnwindDest(), 1246 NewArgs.begin(), NewArgs.end(), 1247 Caller->getName(), Caller); 1248 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1249 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1250 } else { 1251 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1252 Caller->getName(), Caller); 1253 if (cast<CallInst>(Caller)->isTailCall()) 1254 cast<CallInst>(NewCaller)->setTailCall(); 1255 cast<CallInst>(NewCaller)-> 1256 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1257 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1258 } 1259 if (!Caller->getType()->isVoidTy()) 1260 Caller->replaceAllUsesWith(NewCaller); 1261 Caller->eraseFromParent(); 1262 Worklist.Remove(Caller); 1263 return 0; 1264 } 1265 } 1266 1267 // Replace the trampoline call with a direct call. Since there is no 'nest' 1268 // parameter, there is no need to adjust the argument list. Let the generic 1269 // code sort out any function type mismatches. 1270 Constant *NewCallee = 1271 NestF->getType() == PTy ? NestF : 1272 ConstantExpr::getBitCast(NestF, PTy); 1273 CS.setCalledFunction(NewCallee); 1274 return CS.getInstruction(); 1275} 1276 1277