InstCombineCalls.cpp revision ae47be1ea023e4b1e6bbbdc4687333eea54c84c8
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/Transforms/Utils/BuildLibCalls.h" 20#include "llvm/Transforms/Utils/Local.h" 21using namespace llvm; 22 23/// getPromotedType - Return the specified type promoted as it would be to pass 24/// though a va_arg area. 25static const Type *getPromotedType(const Type *Ty) { 26 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 27 if (ITy->getBitWidth() < 32) 28 return Type::getInt32Ty(Ty->getContext()); 29 } 30 return Ty; 31} 32 33 34Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 35 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD); 36 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD); 37 unsigned MinAlign = std::min(DstAlign, SrcAlign); 38 unsigned CopyAlign = MI->getAlignment(); 39 40 if (CopyAlign < MinAlign) { 41 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 42 MinAlign, false)); 43 return MI; 44 } 45 46 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 47 // load/store. 48 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 49 if (MemOpLength == 0) return 0; 50 51 // Source and destination pointer types are always "i8*" for intrinsic. See 52 // if the size is something we can handle with a single primitive load/store. 53 // A single load+store correctly handles overlapping memory in the memmove 54 // case. 55 unsigned Size = MemOpLength->getZExtValue(); 56 if (Size == 0) return MI; // Delete this mem transfer. 57 58 if (Size > 8 || (Size&(Size-1))) 59 return 0; // If not 1/2/4/8 bytes, exit. 60 61 // Use an integer load+store unless we can find something better. 62 unsigned SrcAddrSp = 63 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 64 unsigned DstAddrSp = 65 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 66 67 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 68 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 69 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 70 71 // Memcpy forces the use of i8* for the source and destination. That means 72 // that if you're using memcpy to move one double around, you'll get a cast 73 // from double* to i8*. We'd much rather use a double load+store rather than 74 // an i64 load+store, here because this improves the odds that the source or 75 // dest address will be promotable. See if we can find a better type than the 76 // integer datatype. 77 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 78 if (StrippedDest != MI->getArgOperand(0)) { 79 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 80 ->getElementType(); 81 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 82 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 83 // down through these levels if so. 84 while (!SrcETy->isSingleValueType()) { 85 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 86 if (STy->getNumElements() == 1) 87 SrcETy = STy->getElementType(0); 88 else 89 break; 90 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 91 if (ATy->getNumElements() == 1) 92 SrcETy = ATy->getElementType(); 93 else 94 break; 95 } else 96 break; 97 } 98 99 if (SrcETy->isSingleValueType()) { 100 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 101 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 102 } 103 } 104 } 105 106 107 // If the memcpy/memmove provides better alignment info than we can 108 // infer, use it. 109 SrcAlign = std::max(SrcAlign, CopyAlign); 110 DstAlign = std::max(DstAlign, CopyAlign); 111 112 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 113 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 114 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign); 115 InsertNewInstBefore(L, *MI); 116 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign), 117 *MI); 118 119 // Set the size of the copy to 0, it will be deleted on the next iteration. 120 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 121 return MI; 122} 123 124Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 125 unsigned Alignment = getKnownAlignment(MI->getDest(), TD); 126 if (MI->getAlignment() < Alignment) { 127 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 128 Alignment, false)); 129 return MI; 130 } 131 132 // Extract the length and alignment and fill if they are constant. 133 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 134 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 135 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 136 return 0; 137 uint64_t Len = LenC->getZExtValue(); 138 Alignment = MI->getAlignment(); 139 140 // If the length is zero, this is a no-op 141 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 142 143 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 144 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 145 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 146 147 Value *Dest = MI->getDest(); 148 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 149 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 150 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy); 151 152 // Alignment 0 is identity for alignment 1 for memset, but not store. 153 if (Alignment == 0) Alignment = 1; 154 155 // Extract the fill value and store. 156 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 157 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 158 Dest, false, Alignment), *MI); 159 160 // Set the size of the copy to 0, it will be deleted on the next iteration. 161 MI->setLength(Constant::getNullValue(LenC->getType())); 162 return MI; 163 } 164 165 return 0; 166} 167 168/// visitCallInst - CallInst simplification. This mostly only handles folding 169/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 170/// the heavy lifting. 171/// 172Instruction *InstCombiner::visitCallInst(CallInst &CI) { 173 if (isFreeCall(&CI)) 174 return visitFree(CI); 175 if (isMalloc(&CI)) 176 return visitMalloc(CI); 177 178 // If the caller function is nounwind, mark the call as nounwind, even if the 179 // callee isn't. 180 if (CI.getParent()->getParent()->doesNotThrow() && 181 !CI.doesNotThrow()) { 182 CI.setDoesNotThrow(); 183 return &CI; 184 } 185 186 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 187 if (!II) return visitCallSite(&CI); 188 189 // Intrinsics cannot occur in an invoke, so handle them here instead of in 190 // visitCallSite. 191 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 192 bool Changed = false; 193 194 // memmove/cpy/set of zero bytes is a noop. 195 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 196 if (NumBytes->isNullValue()) 197 return EraseInstFromFunction(CI); 198 199 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 200 if (CI->getZExtValue() == 1) { 201 // Replace the instruction with just byte operations. We would 202 // transform other cases to loads/stores, but we don't know if 203 // alignment is sufficient. 204 } 205 } 206 207 // No other transformations apply to volatile transfers. 208 if (MI->isVolatile()) 209 return 0; 210 211 // If we have a memmove and the source operation is a constant global, 212 // then the source and dest pointers can't alias, so we can change this 213 // into a call to memcpy. 214 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 215 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 216 if (GVSrc->isConstant()) { 217 Module *M = CI.getParent()->getParent()->getParent(); 218 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 219 const Type *Tys[3] = { CI.getArgOperand(0)->getType(), 220 CI.getArgOperand(1)->getType(), 221 CI.getArgOperand(2)->getType() }; 222 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3)); 223 Changed = true; 224 } 225 } 226 227 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 228 // memmove(x,x,size) -> noop. 229 if (MTI->getSource() == MTI->getDest()) 230 return EraseInstFromFunction(CI); 231 } 232 233 // If we can determine a pointer alignment that is bigger than currently 234 // set, update the alignment. 235 if (isa<MemTransferInst>(MI)) { 236 if (Instruction *I = SimplifyMemTransfer(MI)) 237 return I; 238 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 239 if (Instruction *I = SimplifyMemSet(MSI)) 240 return I; 241 } 242 243 if (Changed) return II; 244 } 245 246 switch (II->getIntrinsicID()) { 247 default: break; 248 case Intrinsic::objectsize: { 249 // We need target data for just about everything so depend on it. 250 if (!TD) break; 251 252 const Type *ReturnTy = CI.getType(); 253 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 254 255 // Get to the real allocated thing and offset as fast as possible. 256 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 257 258 // If we've stripped down to a single global variable that we 259 // can know the size of then just return that. 260 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 261 if (GV->hasDefinitiveInitializer()) { 262 Constant *C = GV->getInitializer(); 263 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 264 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 265 } else { 266 // Can't determine size of the GV. 267 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 268 return ReplaceInstUsesWith(CI, RetVal); 269 } 270 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 271 // Get alloca size. 272 if (AI->getAllocatedType()->isSized()) { 273 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 274 if (AI->isArrayAllocation()) { 275 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 276 if (!C) break; 277 AllocaSize *= C->getZExtValue(); 278 } 279 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 280 } 281 } else if (CallInst *MI = extractMallocCall(Op1)) { 282 const Type* MallocType = getMallocAllocatedType(MI); 283 // Get alloca size. 284 if (MallocType && MallocType->isSized()) { 285 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 286 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 287 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 288 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 289 } 290 } 291 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 292 // Only handle constant GEPs here. 293 if (CE->getOpcode() != Instruction::GetElementPtr) break; 294 GEPOperator *GEP = cast<GEPOperator>(CE); 295 296 // Make sure we're not a constant offset from an external 297 // global. 298 Value *Operand = GEP->getPointerOperand(); 299 Operand = Operand->stripPointerCasts(); 300 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 301 if (!GV->hasDefinitiveInitializer()) break; 302 303 // Get what we're pointing to and its size. 304 const PointerType *BaseType = 305 cast<PointerType>(Operand->getType()); 306 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 307 308 // Get the current byte offset into the thing. Use the original 309 // operand in case we're looking through a bitcast. 310 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 311 const PointerType *OffsetType = 312 cast<PointerType>(GEP->getPointerOperand()->getType()); 313 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 314 315 if (Size < Offset) { 316 // Out of bound reference? Negative index normalized to large 317 // index? Just return "I don't know". 318 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 319 return ReplaceInstUsesWith(CI, RetVal); 320 } 321 322 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 323 return ReplaceInstUsesWith(CI, RetVal); 324 } 325 326 // Do not return "I don't know" here. Later optimization passes could 327 // make it possible to evaluate objectsize to a constant. 328 break; 329 } 330 case Intrinsic::bswap: 331 // bswap(bswap(x)) -> x 332 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 333 if (Operand->getIntrinsicID() == Intrinsic::bswap) 334 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 335 336 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 337 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 338 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 339 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 340 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 341 TI->getType()->getPrimitiveSizeInBits(); 342 Value *CV = ConstantInt::get(Operand->getType(), C); 343 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 344 return new TruncInst(V, TI->getType()); 345 } 346 } 347 348 break; 349 case Intrinsic::powi: 350 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 351 // powi(x, 0) -> 1.0 352 if (Power->isZero()) 353 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 354 // powi(x, 1) -> x 355 if (Power->isOne()) 356 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 357 // powi(x, -1) -> 1/x 358 if (Power->isAllOnesValue()) 359 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 360 II->getArgOperand(0)); 361 } 362 break; 363 case Intrinsic::cttz: { 364 // If all bits below the first known one are known zero, 365 // this value is constant. 366 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 367 uint32_t BitWidth = IT->getBitWidth(); 368 APInt KnownZero(BitWidth, 0); 369 APInt KnownOne(BitWidth, 0); 370 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 371 KnownZero, KnownOne); 372 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 373 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 374 if ((Mask & KnownZero) == Mask) 375 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 376 APInt(BitWidth, TrailingZeros))); 377 378 } 379 break; 380 case Intrinsic::ctlz: { 381 // If all bits above the first known one are known zero, 382 // this value is constant. 383 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 384 uint32_t BitWidth = IT->getBitWidth(); 385 APInt KnownZero(BitWidth, 0); 386 APInt KnownOne(BitWidth, 0); 387 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 388 KnownZero, KnownOne); 389 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 390 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 391 if ((Mask & KnownZero) == Mask) 392 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 393 APInt(BitWidth, LeadingZeros))); 394 395 } 396 break; 397 case Intrinsic::uadd_with_overflow: { 398 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 399 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 400 uint32_t BitWidth = IT->getBitWidth(); 401 APInt Mask = APInt::getSignBit(BitWidth); 402 APInt LHSKnownZero(BitWidth, 0); 403 APInt LHSKnownOne(BitWidth, 0); 404 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 405 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 406 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 407 408 if (LHSKnownNegative || LHSKnownPositive) { 409 APInt RHSKnownZero(BitWidth, 0); 410 APInt RHSKnownOne(BitWidth, 0); 411 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 412 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 413 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 414 if (LHSKnownNegative && RHSKnownNegative) { 415 // The sign bit is set in both cases: this MUST overflow. 416 // Create a simple add instruction, and insert it into the struct. 417 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 418 Worklist.Add(Add); 419 Constant *V[] = { 420 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 421 }; 422 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 423 return InsertValueInst::Create(Struct, Add, 0); 424 } 425 426 if (LHSKnownPositive && RHSKnownPositive) { 427 // The sign bit is clear in both cases: this CANNOT overflow. 428 // Create a simple add instruction, and insert it into the struct. 429 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 430 Worklist.Add(Add); 431 Constant *V[] = { 432 UndefValue::get(LHS->getType()), 433 ConstantInt::getFalse(II->getContext()) 434 }; 435 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 436 return InsertValueInst::Create(Struct, Add, 0); 437 } 438 } 439 } 440 // FALL THROUGH uadd into sadd 441 case Intrinsic::sadd_with_overflow: 442 // Canonicalize constants into the RHS. 443 if (isa<Constant>(II->getArgOperand(0)) && 444 !isa<Constant>(II->getArgOperand(1))) { 445 Value *LHS = II->getArgOperand(0); 446 II->setArgOperand(0, II->getArgOperand(1)); 447 II->setArgOperand(1, LHS); 448 return II; 449 } 450 451 // X + undef -> undef 452 if (isa<UndefValue>(II->getArgOperand(1))) 453 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 454 455 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 456 // X + 0 -> {X, false} 457 if (RHS->isZero()) { 458 Constant *V[] = { 459 UndefValue::get(II->getArgOperand(0)->getType()), 460 ConstantInt::getFalse(II->getContext()) 461 }; 462 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 463 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 464 } 465 } 466 break; 467 case Intrinsic::usub_with_overflow: 468 case Intrinsic::ssub_with_overflow: 469 // undef - X -> undef 470 // X - undef -> undef 471 if (isa<UndefValue>(II->getArgOperand(0)) || 472 isa<UndefValue>(II->getArgOperand(1))) 473 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 474 475 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 476 // X - 0 -> {X, false} 477 if (RHS->isZero()) { 478 Constant *V[] = { 479 UndefValue::get(II->getArgOperand(0)->getType()), 480 ConstantInt::getFalse(II->getContext()) 481 }; 482 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 483 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 484 } 485 } 486 break; 487 case Intrinsic::umul_with_overflow: 488 case Intrinsic::smul_with_overflow: 489 // Canonicalize constants into the RHS. 490 if (isa<Constant>(II->getArgOperand(0)) && 491 !isa<Constant>(II->getArgOperand(1))) { 492 Value *LHS = II->getArgOperand(0); 493 II->setArgOperand(0, II->getArgOperand(1)); 494 II->setArgOperand(1, LHS); 495 return II; 496 } 497 498 // X * undef -> undef 499 if (isa<UndefValue>(II->getArgOperand(1))) 500 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 501 502 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 503 // X*0 -> {0, false} 504 if (RHSI->isZero()) 505 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 506 507 // X * 1 -> {X, false} 508 if (RHSI->equalsInt(1)) { 509 Constant *V[] = { 510 UndefValue::get(II->getArgOperand(0)->getType()), 511 ConstantInt::getFalse(II->getContext()) 512 }; 513 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 514 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 515 } 516 } 517 break; 518 case Intrinsic::ppc_altivec_lvx: 519 case Intrinsic::ppc_altivec_lvxl: 520 case Intrinsic::x86_sse_loadu_ps: 521 case Intrinsic::x86_sse2_loadu_pd: 522 case Intrinsic::x86_sse2_loadu_dq: 523 // Turn PPC lvx -> load if the pointer is known aligned. 524 // Turn X86 loadups -> load if the pointer is known aligned. 525 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 526 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 527 PointerType::getUnqual(II->getType())); 528 return new LoadInst(Ptr); 529 } 530 break; 531 case Intrinsic::ppc_altivec_stvx: 532 case Intrinsic::ppc_altivec_stvxl: 533 // Turn stvx -> store if the pointer is known aligned. 534 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) { 535 const Type *OpPtrTy = 536 PointerType::getUnqual(II->getArgOperand(0)->getType()); 537 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 538 return new StoreInst(II->getArgOperand(0), Ptr); 539 } 540 break; 541 case Intrinsic::x86_sse_storeu_ps: 542 case Intrinsic::x86_sse2_storeu_pd: 543 case Intrinsic::x86_sse2_storeu_dq: 544 // Turn X86 storeu -> store if the pointer is known aligned. 545 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 546 const Type *OpPtrTy = 547 PointerType::getUnqual(II->getArgOperand(1)->getType()); 548 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 549 return new StoreInst(II->getArgOperand(1), Ptr); 550 } 551 break; 552 553 case Intrinsic::x86_sse_cvttss2si: { 554 // These intrinsics only demands the 0th element of its input vector. If 555 // we can simplify the input based on that, do so now. 556 unsigned VWidth = 557 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 558 APInt DemandedElts(VWidth, 1); 559 APInt UndefElts(VWidth, 0); 560 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 561 DemandedElts, UndefElts)) { 562 II->setArgOperand(0, V); 563 return II; 564 } 565 break; 566 } 567 568 case Intrinsic::ppc_altivec_vperm: 569 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 570 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 571 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 572 573 // Check that all of the elements are integer constants or undefs. 574 bool AllEltsOk = true; 575 for (unsigned i = 0; i != 16; ++i) { 576 if (!isa<ConstantInt>(Mask->getOperand(i)) && 577 !isa<UndefValue>(Mask->getOperand(i))) { 578 AllEltsOk = false; 579 break; 580 } 581 } 582 583 if (AllEltsOk) { 584 // Cast the input vectors to byte vectors. 585 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 586 Mask->getType()); 587 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 588 Mask->getType()); 589 Value *Result = UndefValue::get(Op0->getType()); 590 591 // Only extract each element once. 592 Value *ExtractedElts[32]; 593 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 594 595 for (unsigned i = 0; i != 16; ++i) { 596 if (isa<UndefValue>(Mask->getOperand(i))) 597 continue; 598 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 599 Idx &= 31; // Match the hardware behavior. 600 601 if (ExtractedElts[Idx] == 0) { 602 ExtractedElts[Idx] = 603 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 604 ConstantInt::get(Type::getInt32Ty(II->getContext()), 605 Idx&15, false), "tmp"); 606 } 607 608 // Insert this value into the result vector. 609 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 610 ConstantInt::get(Type::getInt32Ty(II->getContext()), 611 i, false), "tmp"); 612 } 613 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 614 } 615 } 616 break; 617 618 case Intrinsic::arm_neon_vld1: 619 case Intrinsic::arm_neon_vld2: 620 case Intrinsic::arm_neon_vld3: 621 case Intrinsic::arm_neon_vld4: 622 case Intrinsic::arm_neon_vld2lane: 623 case Intrinsic::arm_neon_vld3lane: 624 case Intrinsic::arm_neon_vld4lane: 625 case Intrinsic::arm_neon_vst1: 626 case Intrinsic::arm_neon_vst2: 627 case Intrinsic::arm_neon_vst3: 628 case Intrinsic::arm_neon_vst4: 629 case Intrinsic::arm_neon_vst2lane: 630 case Intrinsic::arm_neon_vst3lane: 631 case Intrinsic::arm_neon_vst4lane: { 632 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD); 633 unsigned AlignArg = II->getNumArgOperands() - 1; 634 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 635 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 636 II->setArgOperand(AlignArg, 637 ConstantInt::get(Type::getInt32Ty(II->getContext()), 638 MemAlign, false)); 639 return II; 640 } 641 break; 642 } 643 644 case Intrinsic::stackrestore: { 645 // If the save is right next to the restore, remove the restore. This can 646 // happen when variable allocas are DCE'd. 647 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 648 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 649 BasicBlock::iterator BI = SS; 650 if (&*++BI == II) 651 return EraseInstFromFunction(CI); 652 } 653 } 654 655 // Scan down this block to see if there is another stack restore in the 656 // same block without an intervening call/alloca. 657 BasicBlock::iterator BI = II; 658 TerminatorInst *TI = II->getParent()->getTerminator(); 659 bool CannotRemove = false; 660 for (++BI; &*BI != TI; ++BI) { 661 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 662 CannotRemove = true; 663 break; 664 } 665 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 666 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 667 // If there is a stackrestore below this one, remove this one. 668 if (II->getIntrinsicID() == Intrinsic::stackrestore) 669 return EraseInstFromFunction(CI); 670 // Otherwise, ignore the intrinsic. 671 } else { 672 // If we found a non-intrinsic call, we can't remove the stack 673 // restore. 674 CannotRemove = true; 675 break; 676 } 677 } 678 } 679 680 // If the stack restore is in a return/unwind block and if there are no 681 // allocas or calls between the restore and the return, nuke the restore. 682 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 683 return EraseInstFromFunction(CI); 684 break; 685 } 686 } 687 688 return visitCallSite(II); 689} 690 691// InvokeInst simplification 692// 693Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 694 return visitCallSite(&II); 695} 696 697/// isSafeToEliminateVarargsCast - If this cast does not affect the value 698/// passed through the varargs area, we can eliminate the use of the cast. 699static bool isSafeToEliminateVarargsCast(const CallSite CS, 700 const CastInst * const CI, 701 const TargetData * const TD, 702 const int ix) { 703 if (!CI->isLosslessCast()) 704 return false; 705 706 // The size of ByVal arguments is derived from the type, so we 707 // can't change to a type with a different size. If the size were 708 // passed explicitly we could avoid this check. 709 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 710 return true; 711 712 const Type* SrcTy = 713 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 714 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 715 if (!SrcTy->isSized() || !DstTy->isSized()) 716 return false; 717 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 718 return false; 719 return true; 720} 721 722namespace { 723class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 724 InstCombiner *IC; 725protected: 726 void replaceCall(Value *With) { 727 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 728 } 729 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 730 if (ConstantInt *SizeCI = 731 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 732 if (SizeCI->isAllOnesValue()) 733 return true; 734 if (isString) 735 return SizeCI->getZExtValue() >= 736 GetStringLength(CI->getArgOperand(SizeArgOp)); 737 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 738 CI->getArgOperand(SizeArgOp))) 739 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 740 } 741 return false; 742 } 743public: 744 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 745 Instruction *NewInstruction; 746}; 747} // end anonymous namespace 748 749// Try to fold some different type of calls here. 750// Currently we're only working with the checking functions, memcpy_chk, 751// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 752// strcat_chk and strncat_chk. 753Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 754 if (CI->getCalledFunction() == 0) return 0; 755 756 InstCombineFortifiedLibCalls Simplifier(this); 757 Simplifier.fold(CI, TD); 758 return Simplifier.NewInstruction; 759} 760 761// visitCallSite - Improvements for call and invoke instructions. 762// 763Instruction *InstCombiner::visitCallSite(CallSite CS) { 764 bool Changed = false; 765 766 // If the callee is a pointer to a function, attempt to move any casts to the 767 // arguments of the call/invoke. 768 Value *Callee = CS.getCalledValue(); 769 if (!isa<Function>(Callee) && transformConstExprCastCall(CS)) 770 return 0; 771 772 if (Function *CalleeF = dyn_cast<Function>(Callee)) 773 // If the call and callee calling conventions don't match, this call must 774 // be unreachable, as the call is undefined. 775 if (CalleeF->getCallingConv() != CS.getCallingConv() && 776 // Only do this for calls to a function with a body. A prototype may 777 // not actually end up matching the implementation's calling conv for a 778 // variety of reasons (e.g. it may be written in assembly). 779 !CalleeF->isDeclaration()) { 780 Instruction *OldCall = CS.getInstruction(); 781 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 782 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 783 OldCall); 784 // If OldCall dues not return void then replaceAllUsesWith undef. 785 // This allows ValueHandlers and custom metadata to adjust itself. 786 if (!OldCall->getType()->isVoidTy()) 787 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 788 if (isa<CallInst>(OldCall)) 789 return EraseInstFromFunction(*OldCall); 790 791 // We cannot remove an invoke, because it would change the CFG, just 792 // change the callee to a null pointer. 793 cast<InvokeInst>(OldCall)->setCalledFunction( 794 Constant::getNullValue(CalleeF->getType())); 795 return 0; 796 } 797 798 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 799 // This instruction is not reachable, just remove it. We insert a store to 800 // undef so that we know that this code is not reachable, despite the fact 801 // that we can't modify the CFG here. 802 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 803 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 804 CS.getInstruction()); 805 806 // If CS does not return void then replaceAllUsesWith undef. 807 // This allows ValueHandlers and custom metadata to adjust itself. 808 if (!CS.getInstruction()->getType()->isVoidTy()) 809 CS.getInstruction()-> 810 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 811 812 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 813 // Don't break the CFG, insert a dummy cond branch. 814 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 815 ConstantInt::getTrue(Callee->getContext()), II); 816 } 817 return EraseInstFromFunction(*CS.getInstruction()); 818 } 819 820 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 821 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 822 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 823 return transformCallThroughTrampoline(CS); 824 825 const PointerType *PTy = cast<PointerType>(Callee->getType()); 826 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 827 if (FTy->isVarArg()) { 828 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 829 // See if we can optimize any arguments passed through the varargs area of 830 // the call. 831 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 832 E = CS.arg_end(); I != E; ++I, ++ix) { 833 CastInst *CI = dyn_cast<CastInst>(*I); 834 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 835 *I = CI->getOperand(0); 836 Changed = true; 837 } 838 } 839 } 840 841 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 842 // Inline asm calls cannot throw - mark them 'nounwind'. 843 CS.setDoesNotThrow(); 844 Changed = true; 845 } 846 847 // Try to optimize the call if possible, we require TargetData for most of 848 // this. None of these calls are seen as possibly dead so go ahead and 849 // delete the instruction now. 850 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 851 Instruction *I = tryOptimizeCall(CI, TD); 852 // If we changed something return the result, etc. Otherwise let 853 // the fallthrough check. 854 if (I) return EraseInstFromFunction(*I); 855 } 856 857 return Changed ? CS.getInstruction() : 0; 858} 859 860// transformConstExprCastCall - If the callee is a constexpr cast of a function, 861// attempt to move the cast to the arguments of the call/invoke. 862// 863bool InstCombiner::transformConstExprCastCall(CallSite CS) { 864 Function *Callee = 865 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 866 if (Callee == 0) 867 return false; 868 Instruction *Caller = CS.getInstruction(); 869 const AttrListPtr &CallerPAL = CS.getAttributes(); 870 871 // Okay, this is a cast from a function to a different type. Unless doing so 872 // would cause a type conversion of one of our arguments, change this call to 873 // be a direct call with arguments casted to the appropriate types. 874 // 875 const FunctionType *FT = Callee->getFunctionType(); 876 const Type *OldRetTy = Caller->getType(); 877 const Type *NewRetTy = FT->getReturnType(); 878 879 if (NewRetTy->isStructTy()) 880 return false; // TODO: Handle multiple return values. 881 882 // Check to see if we are changing the return type... 883 if (OldRetTy != NewRetTy) { 884 if (Callee->isDeclaration() && 885 // Conversion is ok if changing from one pointer type to another or from 886 // a pointer to an integer of the same size. 887 !((OldRetTy->isPointerTy() || !TD || 888 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 889 (NewRetTy->isPointerTy() || !TD || 890 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 891 return false; // Cannot transform this return value. 892 893 if (!Caller->use_empty() && 894 // void -> non-void is handled specially 895 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 896 return false; // Cannot transform this return value. 897 898 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 899 Attributes RAttrs = CallerPAL.getRetAttributes(); 900 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 901 return false; // Attribute not compatible with transformed value. 902 } 903 904 // If the callsite is an invoke instruction, and the return value is used by 905 // a PHI node in a successor, we cannot change the return type of the call 906 // because there is no place to put the cast instruction (without breaking 907 // the critical edge). Bail out in this case. 908 if (!Caller->use_empty()) 909 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 910 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 911 UI != E; ++UI) 912 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 913 if (PN->getParent() == II->getNormalDest() || 914 PN->getParent() == II->getUnwindDest()) 915 return false; 916 } 917 918 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 919 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 920 921 CallSite::arg_iterator AI = CS.arg_begin(); 922 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 923 const Type *ParamTy = FT->getParamType(i); 924 const Type *ActTy = (*AI)->getType(); 925 926 if (!CastInst::isCastable(ActTy, ParamTy)) 927 return false; // Cannot transform this parameter value. 928 929 unsigned Attrs = CallerPAL.getParamAttributes(i + 1); 930 if (Attrs & Attribute::typeIncompatible(ParamTy)) 931 return false; // Attribute not compatible with transformed value. 932 933 // If the parameter is passed as a byval argument, then we have to have a 934 // sized type and the sized type has to have the same size as the old type. 935 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) { 936 const PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 937 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0) 938 return false; 939 940 const Type *CurElTy = cast<PointerType>(ActTy)->getElementType(); 941 if (TD->getTypeAllocSize(CurElTy) != 942 TD->getTypeAllocSize(ParamPTy->getElementType())) 943 return false; 944 } 945 946 // Converting from one pointer type to another or between a pointer and an 947 // integer of the same size is safe even if we do not have a body. 948 bool isConvertible = ActTy == ParamTy || 949 (TD && ((ParamTy->isPointerTy() || 950 ParamTy == TD->getIntPtrType(Caller->getContext())) && 951 (ActTy->isPointerTy() || 952 ActTy == TD->getIntPtrType(Caller->getContext())))); 953 if (Callee->isDeclaration() && !isConvertible) return false; 954 } 955 956 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 957 Callee->isDeclaration()) 958 return false; // Do not delete arguments unless we have a function body. 959 960 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 961 !CallerPAL.isEmpty()) 962 // In this case we have more arguments than the new function type, but we 963 // won't be dropping them. Check that these extra arguments have attributes 964 // that are compatible with being a vararg call argument. 965 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 966 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 967 break; 968 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 969 if (PAttrs & Attribute::VarArgsIncompatible) 970 return false; 971 } 972 973 // Okay, we decided that this is a safe thing to do: go ahead and start 974 // inserting cast instructions as necessary... 975 std::vector<Value*> Args; 976 Args.reserve(NumActualArgs); 977 SmallVector<AttributeWithIndex, 8> attrVec; 978 attrVec.reserve(NumCommonArgs); 979 980 // Get any return attributes. 981 Attributes RAttrs = CallerPAL.getRetAttributes(); 982 983 // If the return value is not being used, the type may not be compatible 984 // with the existing attributes. Wipe out any problematic attributes. 985 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 986 987 // Add the new return attributes. 988 if (RAttrs) 989 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 990 991 AI = CS.arg_begin(); 992 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 993 const Type *ParamTy = FT->getParamType(i); 994 if ((*AI)->getType() == ParamTy) { 995 Args.push_back(*AI); 996 } else { 997 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 998 false, ParamTy, false); 999 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1000 } 1001 1002 // Add any parameter attributes. 1003 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1004 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1005 } 1006 1007 // If the function takes more arguments than the call was taking, add them 1008 // now. 1009 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1010 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1011 1012 // If we are removing arguments to the function, emit an obnoxious warning. 1013 if (FT->getNumParams() < NumActualArgs) { 1014 if (!FT->isVarArg()) { 1015 errs() << "WARNING: While resolving call to function '" 1016 << Callee->getName() << "' arguments were dropped!\n"; 1017 } else { 1018 // Add all of the arguments in their promoted form to the arg list. 1019 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1020 const Type *PTy = getPromotedType((*AI)->getType()); 1021 if (PTy != (*AI)->getType()) { 1022 // Must promote to pass through va_arg area! 1023 Instruction::CastOps opcode = 1024 CastInst::getCastOpcode(*AI, false, PTy, false); 1025 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1026 } else { 1027 Args.push_back(*AI); 1028 } 1029 1030 // Add any parameter attributes. 1031 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1032 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1033 } 1034 } 1035 } 1036 1037 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1038 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1039 1040 if (NewRetTy->isVoidTy()) 1041 Caller->setName(""); // Void type should not have a name. 1042 1043 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1044 attrVec.end()); 1045 1046 Instruction *NC; 1047 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1048 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1049 Args.begin(), Args.end(), 1050 Caller->getName(), Caller); 1051 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1052 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1053 } else { 1054 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1055 Caller->getName(), Caller); 1056 CallInst *CI = cast<CallInst>(Caller); 1057 if (CI->isTailCall()) 1058 cast<CallInst>(NC)->setTailCall(); 1059 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1060 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1061 } 1062 1063 // Insert a cast of the return type as necessary. 1064 Value *NV = NC; 1065 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1066 if (!NV->getType()->isVoidTy()) { 1067 Instruction::CastOps opcode = 1068 CastInst::getCastOpcode(NC, false, OldRetTy, false); 1069 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1070 1071 // If this is an invoke instruction, we should insert it after the first 1072 // non-phi, instruction in the normal successor block. 1073 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1074 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1075 InsertNewInstBefore(NC, *I); 1076 } else { 1077 // Otherwise, it's a call, just insert cast right after the call. 1078 InsertNewInstBefore(NC, *Caller); 1079 } 1080 Worklist.AddUsersToWorkList(*Caller); 1081 } else { 1082 NV = UndefValue::get(Caller->getType()); 1083 } 1084 } 1085 1086 if (!Caller->use_empty()) 1087 Caller->replaceAllUsesWith(NV); 1088 1089 EraseInstFromFunction(*Caller); 1090 return true; 1091} 1092 1093// transformCallThroughTrampoline - Turn a call to a function created by the 1094// init_trampoline intrinsic into a direct call to the underlying function. 1095// 1096Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1097 Value *Callee = CS.getCalledValue(); 1098 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1099 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1100 const AttrListPtr &Attrs = CS.getAttributes(); 1101 1102 // If the call already has the 'nest' attribute somewhere then give up - 1103 // otherwise 'nest' would occur twice after splicing in the chain. 1104 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1105 return 0; 1106 1107 IntrinsicInst *Tramp = 1108 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1109 1110 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1111 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1112 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1113 1114 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1115 if (!NestAttrs.isEmpty()) { 1116 unsigned NestIdx = 1; 1117 const Type *NestTy = 0; 1118 Attributes NestAttr = Attribute::None; 1119 1120 // Look for a parameter marked with the 'nest' attribute. 1121 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1122 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1123 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1124 // Record the parameter type and any other attributes. 1125 NestTy = *I; 1126 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1127 break; 1128 } 1129 1130 if (NestTy) { 1131 Instruction *Caller = CS.getInstruction(); 1132 std::vector<Value*> NewArgs; 1133 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1134 1135 SmallVector<AttributeWithIndex, 8> NewAttrs; 1136 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1137 1138 // Insert the nest argument into the call argument list, which may 1139 // mean appending it. Likewise for attributes. 1140 1141 // Add any result attributes. 1142 if (Attributes Attr = Attrs.getRetAttributes()) 1143 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1144 1145 { 1146 unsigned Idx = 1; 1147 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1148 do { 1149 if (Idx == NestIdx) { 1150 // Add the chain argument and attributes. 1151 Value *NestVal = Tramp->getArgOperand(2); 1152 if (NestVal->getType() != NestTy) 1153 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1154 NewArgs.push_back(NestVal); 1155 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1156 } 1157 1158 if (I == E) 1159 break; 1160 1161 // Add the original argument and attributes. 1162 NewArgs.push_back(*I); 1163 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1164 NewAttrs.push_back 1165 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1166 1167 ++Idx, ++I; 1168 } while (1); 1169 } 1170 1171 // Add any function attributes. 1172 if (Attributes Attr = Attrs.getFnAttributes()) 1173 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1174 1175 // The trampoline may have been bitcast to a bogus type (FTy). 1176 // Handle this by synthesizing a new function type, equal to FTy 1177 // with the chain parameter inserted. 1178 1179 std::vector<const Type*> NewTypes; 1180 NewTypes.reserve(FTy->getNumParams()+1); 1181 1182 // Insert the chain's type into the list of parameter types, which may 1183 // mean appending it. 1184 { 1185 unsigned Idx = 1; 1186 FunctionType::param_iterator I = FTy->param_begin(), 1187 E = FTy->param_end(); 1188 1189 do { 1190 if (Idx == NestIdx) 1191 // Add the chain's type. 1192 NewTypes.push_back(NestTy); 1193 1194 if (I == E) 1195 break; 1196 1197 // Add the original type. 1198 NewTypes.push_back(*I); 1199 1200 ++Idx, ++I; 1201 } while (1); 1202 } 1203 1204 // Replace the trampoline call with a direct call. Let the generic 1205 // code sort out any function type mismatches. 1206 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1207 FTy->isVarArg()); 1208 Constant *NewCallee = 1209 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1210 NestF : ConstantExpr::getBitCast(NestF, 1211 PointerType::getUnqual(NewFTy)); 1212 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1213 NewAttrs.end()); 1214 1215 Instruction *NewCaller; 1216 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1217 NewCaller = InvokeInst::Create(NewCallee, 1218 II->getNormalDest(), II->getUnwindDest(), 1219 NewArgs.begin(), NewArgs.end(), 1220 Caller->getName(), Caller); 1221 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1222 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1223 } else { 1224 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1225 Caller->getName(), Caller); 1226 if (cast<CallInst>(Caller)->isTailCall()) 1227 cast<CallInst>(NewCaller)->setTailCall(); 1228 cast<CallInst>(NewCaller)-> 1229 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1230 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1231 } 1232 if (!Caller->getType()->isVoidTy()) 1233 Caller->replaceAllUsesWith(NewCaller); 1234 Caller->eraseFromParent(); 1235 Worklist.Remove(Caller); 1236 return 0; 1237 } 1238 } 1239 1240 // Replace the trampoline call with a direct call. Since there is no 'nest' 1241 // parameter, there is no need to adjust the argument list. Let the generic 1242 // code sort out any function type mismatches. 1243 Constant *NewCallee = 1244 NestF->getType() == PTy ? NestF : 1245 ConstantExpr::getBitCast(NestF, PTy); 1246 CS.setCalledFunction(NewCallee); 1247 return CS.getInstruction(); 1248} 1249 1250