InstCombineCalls.cpp revision 949124ce0fcaa2428adb95a729ccf8727054f294
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19using namespace llvm; 20 21/// getPromotedType - Return the specified type promoted as it would be to pass 22/// though a va_arg area. 23static const Type *getPromotedType(const Type *Ty) { 24 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 25 if (ITy->getBitWidth() < 32) 26 return Type::getInt32Ty(Ty->getContext()); 27 } 28 return Ty; 29} 30 31/// EnforceKnownAlignment - If the specified pointer points to an object that 32/// we control, modify the object's alignment to PrefAlign. This isn't 33/// often possible though. If alignment is important, a more reliable approach 34/// is to simply align all global variables and allocation instructions to 35/// their preferred alignment from the beginning. 36/// 37static unsigned EnforceKnownAlignment(Value *V, 38 unsigned Align, unsigned PrefAlign) { 39 40 User *U = dyn_cast<User>(V); 41 if (!U) return Align; 42 43 switch (Operator::getOpcode(U)) { 44 default: break; 45 case Instruction::BitCast: 46 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 47 case Instruction::GetElementPtr: { 48 // If all indexes are zero, it is just the alignment of the base pointer. 49 bool AllZeroOperands = true; 50 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 51 if (!isa<Constant>(*i) || 52 !cast<Constant>(*i)->isNullValue()) { 53 AllZeroOperands = false; 54 break; 55 } 56 57 if (AllZeroOperands) { 58 // Treat this like a bitcast. 59 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 60 } 61 break; 62 } 63 } 64 65 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 66 // If there is a large requested alignment and we can, bump up the alignment 67 // of the global. 68 if (!GV->isDeclaration()) { 69 if (GV->getAlignment() >= PrefAlign) 70 Align = GV->getAlignment(); 71 else { 72 GV->setAlignment(PrefAlign); 73 Align = PrefAlign; 74 } 75 } 76 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 77 // If there is a requested alignment and if this is an alloca, round up. 78 if (AI->getAlignment() >= PrefAlign) 79 Align = AI->getAlignment(); 80 else { 81 AI->setAlignment(PrefAlign); 82 Align = PrefAlign; 83 } 84 } 85 86 return Align; 87} 88 89/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 90/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 91/// and it is more than the alignment of the ultimate object, see if we can 92/// increase the alignment of the ultimate object, making this check succeed. 93unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 94 unsigned PrefAlign) { 95 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 96 sizeof(PrefAlign) * CHAR_BIT; 97 APInt Mask = APInt::getAllOnesValue(BitWidth); 98 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 99 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 100 unsigned TrailZ = KnownZero.countTrailingOnes(); 101 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 102 103 if (PrefAlign > Align) 104 Align = EnforceKnownAlignment(V, Align, PrefAlign); 105 106 // We don't need to make any adjustment. 107 return Align; 108} 109 110Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 111 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1)); 112 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2)); 113 unsigned MinAlign = std::min(DstAlign, SrcAlign); 114 unsigned CopyAlign = MI->getAlignment(); 115 116 if (CopyAlign < MinAlign) { 117 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 118 MinAlign, false)); 119 return MI; 120 } 121 122 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 123 // load/store. 124 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3)); 125 if (MemOpLength == 0) return 0; 126 127 // Source and destination pointer types are always "i8*" for intrinsic. See 128 // if the size is something we can handle with a single primitive load/store. 129 // A single load+store correctly handles overlapping memory in the memmove 130 // case. 131 unsigned Size = MemOpLength->getZExtValue(); 132 if (Size == 0) return MI; // Delete this mem transfer. 133 134 if (Size > 8 || (Size&(Size-1))) 135 return 0; // If not 1/2/4/8 bytes, exit. 136 137 // Use an integer load+store unless we can find something better. 138 Type *NewPtrTy = 139 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3)); 140 141 // Memcpy forces the use of i8* for the source and destination. That means 142 // that if you're using memcpy to move one double around, you'll get a cast 143 // from double* to i8*. We'd much rather use a double load+store rather than 144 // an i64 load+store, here because this improves the odds that the source or 145 // dest address will be promotable. See if we can find a better type than the 146 // integer datatype. 147 Value *StrippedDest = MI->getOperand(1)->stripPointerCasts(); 148 if (StrippedDest != MI->getOperand(1)) { 149 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 150 ->getElementType(); 151 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 152 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 153 // down through these levels if so. 154 while (!SrcETy->isSingleValueType()) { 155 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 156 if (STy->getNumElements() == 1) 157 SrcETy = STy->getElementType(0); 158 else 159 break; 160 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 161 if (ATy->getNumElements() == 1) 162 SrcETy = ATy->getElementType(); 163 else 164 break; 165 } else 166 break; 167 } 168 169 if (SrcETy->isSingleValueType()) 170 NewPtrTy = PointerType::getUnqual(SrcETy); 171 } 172 } 173 174 175 // If the memcpy/memmove provides better alignment info than we can 176 // infer, use it. 177 SrcAlign = std::max(SrcAlign, CopyAlign); 178 DstAlign = std::max(DstAlign, CopyAlign); 179 180 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy); 181 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy); 182 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign); 183 InsertNewInstBefore(L, *MI); 184 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI); 185 186 // Set the size of the copy to 0, it will be deleted on the next iteration. 187 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType())); 188 return MI; 189} 190 191Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 192 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 193 if (MI->getAlignment() < Alignment) { 194 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 195 Alignment, false)); 196 return MI; 197 } 198 199 // Extract the length and alignment and fill if they are constant. 200 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 201 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 202 if (!LenC || !FillC || !FillC->getType()->isInteger(8)) 203 return 0; 204 uint64_t Len = LenC->getZExtValue(); 205 Alignment = MI->getAlignment(); 206 207 // If the length is zero, this is a no-op 208 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 209 210 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 211 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 212 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 213 214 Value *Dest = MI->getDest(); 215 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 216 217 // Alignment 0 is identity for alignment 1 for memset, but not store. 218 if (Alignment == 0) Alignment = 1; 219 220 // Extract the fill value and store. 221 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 222 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 223 Dest, false, Alignment), *MI); 224 225 // Set the size of the copy to 0, it will be deleted on the next iteration. 226 MI->setLength(Constant::getNullValue(LenC->getType())); 227 return MI; 228 } 229 230 return 0; 231} 232 233 234/// visitCallInst - CallInst simplification. This mostly only handles folding 235/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 236/// the heavy lifting. 237/// 238Instruction *InstCombiner::visitCallInst(CallInst &CI) { 239 if (isFreeCall(&CI)) 240 return visitFree(CI); 241 242 // If the caller function is nounwind, mark the call as nounwind, even if the 243 // callee isn't. 244 if (CI.getParent()->getParent()->doesNotThrow() && 245 !CI.doesNotThrow()) { 246 CI.setDoesNotThrow(); 247 return &CI; 248 } 249 250 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 251 if (!II) return visitCallSite(&CI); 252 253 // Intrinsics cannot occur in an invoke, so handle them here instead of in 254 // visitCallSite. 255 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 256 bool Changed = false; 257 258 // memmove/cpy/set of zero bytes is a noop. 259 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 260 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 261 262 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 263 if (CI->getZExtValue() == 1) { 264 // Replace the instruction with just byte operations. We would 265 // transform other cases to loads/stores, but we don't know if 266 // alignment is sufficient. 267 } 268 } 269 270 // If we have a memmove and the source operation is a constant global, 271 // then the source and dest pointers can't alias, so we can change this 272 // into a call to memcpy. 273 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 274 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 275 if (GVSrc->isConstant()) { 276 Module *M = CI.getParent()->getParent()->getParent(); 277 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 278 const Type *Tys[1]; 279 Tys[0] = CI.getOperand(3)->getType(); 280 CI.setOperand(0, 281 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1)); 282 Changed = true; 283 } 284 } 285 286 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 287 // memmove(x,x,size) -> noop. 288 if (MTI->getSource() == MTI->getDest()) 289 return EraseInstFromFunction(CI); 290 } 291 292 // If we can determine a pointer alignment that is bigger than currently 293 // set, update the alignment. 294 if (isa<MemTransferInst>(MI)) { 295 if (Instruction *I = SimplifyMemTransfer(MI)) 296 return I; 297 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 298 if (Instruction *I = SimplifyMemSet(MSI)) 299 return I; 300 } 301 302 if (Changed) return II; 303 } 304 305 switch (II->getIntrinsicID()) { 306 default: break; 307 case Intrinsic::bswap: 308 // bswap(bswap(x)) -> x 309 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1))) 310 if (Operand->getIntrinsicID() == Intrinsic::bswap) 311 return ReplaceInstUsesWith(CI, Operand->getOperand(1)); 312 313 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 314 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) { 315 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 316 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 317 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 318 TI->getType()->getPrimitiveSizeInBits(); 319 Value *CV = ConstantInt::get(Operand->getType(), C); 320 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV); 321 return new TruncInst(V, TI->getType()); 322 } 323 } 324 325 break; 326 case Intrinsic::powi: 327 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) { 328 // powi(x, 0) -> 1.0 329 if (Power->isZero()) 330 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 331 // powi(x, 1) -> x 332 if (Power->isOne()) 333 return ReplaceInstUsesWith(CI, II->getOperand(1)); 334 // powi(x, -1) -> 1/x 335 if (Power->isAllOnesValue()) 336 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 337 II->getOperand(1)); 338 } 339 break; 340 case Intrinsic::cttz: { 341 // If all bits below the first known one are known zero, 342 // this value is constant. 343 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 344 uint32_t BitWidth = IT->getBitWidth(); 345 APInt KnownZero(BitWidth, 0); 346 APInt KnownOne(BitWidth, 0); 347 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 348 KnownZero, KnownOne); 349 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 350 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 351 if ((Mask & KnownZero) == Mask) 352 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 353 APInt(BitWidth, TrailingZeros))); 354 355 } 356 break; 357 case Intrinsic::ctlz: { 358 // If all bits above the first known one are known zero, 359 // this value is constant. 360 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 361 uint32_t BitWidth = IT->getBitWidth(); 362 APInt KnownZero(BitWidth, 0); 363 APInt KnownOne(BitWidth, 0); 364 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 365 KnownZero, KnownOne); 366 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 367 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 368 if ((Mask & KnownZero) == Mask) 369 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 370 APInt(BitWidth, LeadingZeros))); 371 372 } 373 break; 374 case Intrinsic::uadd_with_overflow: { 375 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2); 376 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 377 uint32_t BitWidth = IT->getBitWidth(); 378 APInt Mask = APInt::getSignBit(BitWidth); 379 APInt LHSKnownZero(BitWidth, 0); 380 APInt LHSKnownOne(BitWidth, 0); 381 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 382 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 383 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 384 385 if (LHSKnownNegative || LHSKnownPositive) { 386 APInt RHSKnownZero(BitWidth, 0); 387 APInt RHSKnownOne(BitWidth, 0); 388 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 389 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 390 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 391 if (LHSKnownNegative && RHSKnownNegative) { 392 // The sign bit is set in both cases: this MUST overflow. 393 // Create a simple add instruction, and insert it into the struct. 394 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 395 Worklist.Add(Add); 396 Constant *V[] = { 397 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 398 }; 399 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 400 return InsertValueInst::Create(Struct, Add, 0); 401 } 402 403 if (LHSKnownPositive && RHSKnownPositive) { 404 // The sign bit is clear in both cases: this CANNOT overflow. 405 // Create a simple add instruction, and insert it into the struct. 406 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 407 Worklist.Add(Add); 408 Constant *V[] = { 409 UndefValue::get(LHS->getType()), 410 ConstantInt::getFalse(II->getContext()) 411 }; 412 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 413 return InsertValueInst::Create(Struct, Add, 0); 414 } 415 } 416 } 417 // FALL THROUGH uadd into sadd 418 case Intrinsic::sadd_with_overflow: 419 // Canonicalize constants into the RHS. 420 if (isa<Constant>(II->getOperand(1)) && 421 !isa<Constant>(II->getOperand(2))) { 422 Value *LHS = II->getOperand(1); 423 II->setOperand(1, II->getOperand(2)); 424 II->setOperand(2, LHS); 425 return II; 426 } 427 428 // X + undef -> undef 429 if (isa<UndefValue>(II->getOperand(2))) 430 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 431 432 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 433 // X + 0 -> {X, false} 434 if (RHS->isZero()) { 435 Constant *V[] = { 436 UndefValue::get(II->getOperand(0)->getType()), 437 ConstantInt::getFalse(II->getContext()) 438 }; 439 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 440 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 441 } 442 } 443 break; 444 case Intrinsic::usub_with_overflow: 445 case Intrinsic::ssub_with_overflow: 446 // undef - X -> undef 447 // X - undef -> undef 448 if (isa<UndefValue>(II->getOperand(1)) || 449 isa<UndefValue>(II->getOperand(2))) 450 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 451 452 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 453 // X - 0 -> {X, false} 454 if (RHS->isZero()) { 455 Constant *V[] = { 456 UndefValue::get(II->getOperand(1)->getType()), 457 ConstantInt::getFalse(II->getContext()) 458 }; 459 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 460 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 461 } 462 } 463 break; 464 case Intrinsic::umul_with_overflow: 465 case Intrinsic::smul_with_overflow: 466 // Canonicalize constants into the RHS. 467 if (isa<Constant>(II->getOperand(1)) && 468 !isa<Constant>(II->getOperand(2))) { 469 Value *LHS = II->getOperand(1); 470 II->setOperand(1, II->getOperand(2)); 471 II->setOperand(2, LHS); 472 return II; 473 } 474 475 // X * undef -> undef 476 if (isa<UndefValue>(II->getOperand(2))) 477 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 478 479 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) { 480 // X*0 -> {0, false} 481 if (RHSI->isZero()) 482 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 483 484 // X * 1 -> {X, false} 485 if (RHSI->equalsInt(1)) { 486 Constant *V[] = { 487 UndefValue::get(II->getOperand(1)->getType()), 488 ConstantInt::getFalse(II->getContext()) 489 }; 490 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 491 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 492 } 493 } 494 break; 495 case Intrinsic::ppc_altivec_lvx: 496 case Intrinsic::ppc_altivec_lvxl: 497 case Intrinsic::x86_sse_loadu_ps: 498 case Intrinsic::x86_sse2_loadu_pd: 499 case Intrinsic::x86_sse2_loadu_dq: 500 // Turn PPC lvx -> load if the pointer is known aligned. 501 // Turn X86 loadups -> load if the pointer is known aligned. 502 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 503 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), 504 PointerType::getUnqual(II->getType())); 505 return new LoadInst(Ptr); 506 } 507 break; 508 case Intrinsic::ppc_altivec_stvx: 509 case Intrinsic::ppc_altivec_stvxl: 510 // Turn stvx -> store if the pointer is known aligned. 511 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) { 512 const Type *OpPtrTy = 513 PointerType::getUnqual(II->getOperand(1)->getType()); 514 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy); 515 return new StoreInst(II->getOperand(1), Ptr); 516 } 517 break; 518 case Intrinsic::x86_sse_storeu_ps: 519 case Intrinsic::x86_sse2_storeu_pd: 520 case Intrinsic::x86_sse2_storeu_dq: 521 // Turn X86 storeu -> store if the pointer is known aligned. 522 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 523 const Type *OpPtrTy = 524 PointerType::getUnqual(II->getOperand(2)->getType()); 525 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy); 526 return new StoreInst(II->getOperand(2), Ptr); 527 } 528 break; 529 530 case Intrinsic::x86_sse_cvttss2si: { 531 // These intrinsics only demands the 0th element of its input vector. If 532 // we can simplify the input based on that, do so now. 533 unsigned VWidth = 534 cast<VectorType>(II->getOperand(1)->getType())->getNumElements(); 535 APInt DemandedElts(VWidth, 1); 536 APInt UndefElts(VWidth, 0); 537 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts, 538 UndefElts)) { 539 II->setOperand(1, V); 540 return II; 541 } 542 break; 543 } 544 545 case Intrinsic::ppc_altivec_vperm: 546 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 547 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) { 548 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 549 550 // Check that all of the elements are integer constants or undefs. 551 bool AllEltsOk = true; 552 for (unsigned i = 0; i != 16; ++i) { 553 if (!isa<ConstantInt>(Mask->getOperand(i)) && 554 !isa<UndefValue>(Mask->getOperand(i))) { 555 AllEltsOk = false; 556 break; 557 } 558 } 559 560 if (AllEltsOk) { 561 // Cast the input vectors to byte vectors. 562 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType()); 563 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType()); 564 Value *Result = UndefValue::get(Op0->getType()); 565 566 // Only extract each element once. 567 Value *ExtractedElts[32]; 568 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 569 570 for (unsigned i = 0; i != 16; ++i) { 571 if (isa<UndefValue>(Mask->getOperand(i))) 572 continue; 573 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 574 Idx &= 31; // Match the hardware behavior. 575 576 if (ExtractedElts[Idx] == 0) { 577 ExtractedElts[Idx] = 578 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 579 ConstantInt::get(Type::getInt32Ty(II->getContext()), 580 Idx&15, false), "tmp"); 581 } 582 583 // Insert this value into the result vector. 584 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 585 ConstantInt::get(Type::getInt32Ty(II->getContext()), 586 i, false), "tmp"); 587 } 588 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 589 } 590 } 591 break; 592 593 case Intrinsic::stackrestore: { 594 // If the save is right next to the restore, remove the restore. This can 595 // happen when variable allocas are DCE'd. 596 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) { 597 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 598 BasicBlock::iterator BI = SS; 599 if (&*++BI == II) 600 return EraseInstFromFunction(CI); 601 } 602 } 603 604 // Scan down this block to see if there is another stack restore in the 605 // same block without an intervening call/alloca. 606 BasicBlock::iterator BI = II; 607 TerminatorInst *TI = II->getParent()->getTerminator(); 608 bool CannotRemove = false; 609 for (++BI; &*BI != TI; ++BI) { 610 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 611 CannotRemove = true; 612 break; 613 } 614 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 615 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 616 // If there is a stackrestore below this one, remove this one. 617 if (II->getIntrinsicID() == Intrinsic::stackrestore) 618 return EraseInstFromFunction(CI); 619 // Otherwise, ignore the intrinsic. 620 } else { 621 // If we found a non-intrinsic call, we can't remove the stack 622 // restore. 623 CannotRemove = true; 624 break; 625 } 626 } 627 } 628 629 // If the stack restore is in a return/unwind block and if there are no 630 // allocas or calls between the restore and the return, nuke the restore. 631 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 632 return EraseInstFromFunction(CI); 633 break; 634 } 635 case Intrinsic::objectsize: { 636 const Type *ReturnTy = CI.getType(); 637 Value *Op1 = II->getOperand(1); 638 639 // If we're a constant expr then we just return the number of bytes 640 // left in whatever we're indexing. Since it's constant there's no 641 // need for maximum or minimum bytes. 642 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 643 // If this isn't a GEP give up. 644 if (CE->getOpcode() != Instruction::GetElementPtr) return 0; 645 646 const PointerType *ObjTy = 647 reinterpret_cast<const PointerType*>(CE->getOperand(0)->getType()); 648 649 if (const ArrayType *AT = dyn_cast<ArrayType>(ObjTy->getElementType())) { 650 651 // Deal with multi-dimensional arrays 652 const ArrayType *SAT = AT; 653 while ((AT = dyn_cast<ArrayType>(AT->getElementType()))) 654 SAT = AT; 655 656 size_t numElems = SAT->getNumElements(); 657 // We return the remaining bytes, so grab the size of an element 658 // in bytes. 659 size_t sizeofElem = SAT->getElementType()->getPrimitiveSizeInBits() / 8; 660 661 ConstantInt *Const = 662 cast<ConstantInt>(CE->getOperand(CE->getNumOperands() - 1)); 663 size_t indx = Const->getZExtValue(); 664 return ReplaceInstUsesWith(CI, 665 ConstantInt::get(ReturnTy, 666 ((numElems - indx) * sizeofElem))); 667 } 668 } 669 670 // TODO: Add more Instruction types here. 671 672 } 673 } 674 675 return visitCallSite(II); 676} 677 678// InvokeInst simplification 679// 680Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 681 return visitCallSite(&II); 682} 683 684/// isSafeToEliminateVarargsCast - If this cast does not affect the value 685/// passed through the varargs area, we can eliminate the use of the cast. 686static bool isSafeToEliminateVarargsCast(const CallSite CS, 687 const CastInst * const CI, 688 const TargetData * const TD, 689 const int ix) { 690 if (!CI->isLosslessCast()) 691 return false; 692 693 // The size of ByVal arguments is derived from the type, so we 694 // can't change to a type with a different size. If the size were 695 // passed explicitly we could avoid this check. 696 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 697 return true; 698 699 const Type* SrcTy = 700 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 701 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 702 if (!SrcTy->isSized() || !DstTy->isSized()) 703 return false; 704 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 705 return false; 706 return true; 707} 708 709// visitCallSite - Improvements for call and invoke instructions. 710// 711Instruction *InstCombiner::visitCallSite(CallSite CS) { 712 bool Changed = false; 713 714 // If the callee is a constexpr cast of a function, attempt to move the cast 715 // to the arguments of the call/invoke. 716 if (transformConstExprCastCall(CS)) return 0; 717 718 Value *Callee = CS.getCalledValue(); 719 720 if (Function *CalleeF = dyn_cast<Function>(Callee)) 721 if (CalleeF->getCallingConv() != CS.getCallingConv()) { 722 Instruction *OldCall = CS.getInstruction(); 723 // If the call and callee calling conventions don't match, this call must 724 // be unreachable, as the call is undefined. 725 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 726 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 727 OldCall); 728 // If OldCall dues not return void then replaceAllUsesWith undef. 729 // This allows ValueHandlers and custom metadata to adjust itself. 730 if (!OldCall->getType()->isVoidTy()) 731 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 732 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here. 733 return EraseInstFromFunction(*OldCall); 734 return 0; 735 } 736 737 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 738 // This instruction is not reachable, just remove it. We insert a store to 739 // undef so that we know that this code is not reachable, despite the fact 740 // that we can't modify the CFG here. 741 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 742 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 743 CS.getInstruction()); 744 745 // If CS dues not return void then replaceAllUsesWith undef. 746 // This allows ValueHandlers and custom metadata to adjust itself. 747 if (!CS.getInstruction()->getType()->isVoidTy()) 748 CS.getInstruction()-> 749 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 750 751 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 752 // Don't break the CFG, insert a dummy cond branch. 753 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 754 ConstantInt::getTrue(Callee->getContext()), II); 755 } 756 return EraseInstFromFunction(*CS.getInstruction()); 757 } 758 759 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 760 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 761 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 762 return transformCallThroughTrampoline(CS); 763 764 const PointerType *PTy = cast<PointerType>(Callee->getType()); 765 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 766 if (FTy->isVarArg()) { 767 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 768 // See if we can optimize any arguments passed through the varargs area of 769 // the call. 770 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 771 E = CS.arg_end(); I != E; ++I, ++ix) { 772 CastInst *CI = dyn_cast<CastInst>(*I); 773 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 774 *I = CI->getOperand(0); 775 Changed = true; 776 } 777 } 778 } 779 780 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 781 // Inline asm calls cannot throw - mark them 'nounwind'. 782 CS.setDoesNotThrow(); 783 Changed = true; 784 } 785 786 return Changed ? CS.getInstruction() : 0; 787} 788 789// transformConstExprCastCall - If the callee is a constexpr cast of a function, 790// attempt to move the cast to the arguments of the call/invoke. 791// 792bool InstCombiner::transformConstExprCastCall(CallSite CS) { 793 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 794 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 795 if (CE->getOpcode() != Instruction::BitCast || 796 !isa<Function>(CE->getOperand(0))) 797 return false; 798 Function *Callee = cast<Function>(CE->getOperand(0)); 799 Instruction *Caller = CS.getInstruction(); 800 const AttrListPtr &CallerPAL = CS.getAttributes(); 801 802 // Okay, this is a cast from a function to a different type. Unless doing so 803 // would cause a type conversion of one of our arguments, change this call to 804 // be a direct call with arguments casted to the appropriate types. 805 // 806 const FunctionType *FT = Callee->getFunctionType(); 807 const Type *OldRetTy = Caller->getType(); 808 const Type *NewRetTy = FT->getReturnType(); 809 810 if (isa<StructType>(NewRetTy)) 811 return false; // TODO: Handle multiple return values. 812 813 // Check to see if we are changing the return type... 814 if (OldRetTy != NewRetTy) { 815 if (Callee->isDeclaration() && 816 // Conversion is ok if changing from one pointer type to another or from 817 // a pointer to an integer of the same size. 818 !((isa<PointerType>(OldRetTy) || !TD || 819 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 820 (isa<PointerType>(NewRetTy) || !TD || 821 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 822 return false; // Cannot transform this return value. 823 824 if (!Caller->use_empty() && 825 // void -> non-void is handled specially 826 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 827 return false; // Cannot transform this return value. 828 829 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 830 Attributes RAttrs = CallerPAL.getRetAttributes(); 831 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 832 return false; // Attribute not compatible with transformed value. 833 } 834 835 // If the callsite is an invoke instruction, and the return value is used by 836 // a PHI node in a successor, we cannot change the return type of the call 837 // because there is no place to put the cast instruction (without breaking 838 // the critical edge). Bail out in this case. 839 if (!Caller->use_empty()) 840 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 841 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 842 UI != E; ++UI) 843 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 844 if (PN->getParent() == II->getNormalDest() || 845 PN->getParent() == II->getUnwindDest()) 846 return false; 847 } 848 849 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 850 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 851 852 CallSite::arg_iterator AI = CS.arg_begin(); 853 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 854 const Type *ParamTy = FT->getParamType(i); 855 const Type *ActTy = (*AI)->getType(); 856 857 if (!CastInst::isCastable(ActTy, ParamTy)) 858 return false; // Cannot transform this parameter value. 859 860 if (CallerPAL.getParamAttributes(i + 1) 861 & Attribute::typeIncompatible(ParamTy)) 862 return false; // Attribute not compatible with transformed value. 863 864 // Converting from one pointer type to another or between a pointer and an 865 // integer of the same size is safe even if we do not have a body. 866 bool isConvertible = ActTy == ParamTy || 867 (TD && ((isa<PointerType>(ParamTy) || 868 ParamTy == TD->getIntPtrType(Caller->getContext())) && 869 (isa<PointerType>(ActTy) || 870 ActTy == TD->getIntPtrType(Caller->getContext())))); 871 if (Callee->isDeclaration() && !isConvertible) return false; 872 } 873 874 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 875 Callee->isDeclaration()) 876 return false; // Do not delete arguments unless we have a function body. 877 878 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 879 !CallerPAL.isEmpty()) 880 // In this case we have more arguments than the new function type, but we 881 // won't be dropping them. Check that these extra arguments have attributes 882 // that are compatible with being a vararg call argument. 883 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 884 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 885 break; 886 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 887 if (PAttrs & Attribute::VarArgsIncompatible) 888 return false; 889 } 890 891 // Okay, we decided that this is a safe thing to do: go ahead and start 892 // inserting cast instructions as necessary... 893 std::vector<Value*> Args; 894 Args.reserve(NumActualArgs); 895 SmallVector<AttributeWithIndex, 8> attrVec; 896 attrVec.reserve(NumCommonArgs); 897 898 // Get any return attributes. 899 Attributes RAttrs = CallerPAL.getRetAttributes(); 900 901 // If the return value is not being used, the type may not be compatible 902 // with the existing attributes. Wipe out any problematic attributes. 903 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 904 905 // Add the new return attributes. 906 if (RAttrs) 907 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 908 909 AI = CS.arg_begin(); 910 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 911 const Type *ParamTy = FT->getParamType(i); 912 if ((*AI)->getType() == ParamTy) { 913 Args.push_back(*AI); 914 } else { 915 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 916 false, ParamTy, false); 917 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 918 } 919 920 // Add any parameter attributes. 921 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 922 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 923 } 924 925 // If the function takes more arguments than the call was taking, add them 926 // now. 927 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 928 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 929 930 // If we are removing arguments to the function, emit an obnoxious warning. 931 if (FT->getNumParams() < NumActualArgs) { 932 if (!FT->isVarArg()) { 933 errs() << "WARNING: While resolving call to function '" 934 << Callee->getName() << "' arguments were dropped!\n"; 935 } else { 936 // Add all of the arguments in their promoted form to the arg list. 937 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 938 const Type *PTy = getPromotedType((*AI)->getType()); 939 if (PTy != (*AI)->getType()) { 940 // Must promote to pass through va_arg area! 941 Instruction::CastOps opcode = 942 CastInst::getCastOpcode(*AI, false, PTy, false); 943 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 944 } else { 945 Args.push_back(*AI); 946 } 947 948 // Add any parameter attributes. 949 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 950 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 951 } 952 } 953 } 954 955 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 956 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 957 958 if (NewRetTy->isVoidTy()) 959 Caller->setName(""); // Void type should not have a name. 960 961 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 962 attrVec.end()); 963 964 Instruction *NC; 965 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 966 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 967 Args.begin(), Args.end(), 968 Caller->getName(), Caller); 969 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 970 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 971 } else { 972 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 973 Caller->getName(), Caller); 974 CallInst *CI = cast<CallInst>(Caller); 975 if (CI->isTailCall()) 976 cast<CallInst>(NC)->setTailCall(); 977 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 978 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 979 } 980 981 // Insert a cast of the return type as necessary. 982 Value *NV = NC; 983 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 984 if (!NV->getType()->isVoidTy()) { 985 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 986 OldRetTy, false); 987 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 988 989 // If this is an invoke instruction, we should insert it after the first 990 // non-phi, instruction in the normal successor block. 991 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 992 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 993 InsertNewInstBefore(NC, *I); 994 } else { 995 // Otherwise, it's a call, just insert cast right after the call instr 996 InsertNewInstBefore(NC, *Caller); 997 } 998 Worklist.AddUsersToWorkList(*Caller); 999 } else { 1000 NV = UndefValue::get(Caller->getType()); 1001 } 1002 } 1003 1004 1005 if (!Caller->use_empty()) 1006 Caller->replaceAllUsesWith(NV); 1007 1008 EraseInstFromFunction(*Caller); 1009 return true; 1010} 1011 1012// transformCallThroughTrampoline - Turn a call to a function created by the 1013// init_trampoline intrinsic into a direct call to the underlying function. 1014// 1015Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1016 Value *Callee = CS.getCalledValue(); 1017 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1018 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1019 const AttrListPtr &Attrs = CS.getAttributes(); 1020 1021 // If the call already has the 'nest' attribute somewhere then give up - 1022 // otherwise 'nest' would occur twice after splicing in the chain. 1023 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1024 return 0; 1025 1026 IntrinsicInst *Tramp = 1027 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1028 1029 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts()); 1030 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1031 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1032 1033 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1034 if (!NestAttrs.isEmpty()) { 1035 unsigned NestIdx = 1; 1036 const Type *NestTy = 0; 1037 Attributes NestAttr = Attribute::None; 1038 1039 // Look for a parameter marked with the 'nest' attribute. 1040 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1041 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1042 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1043 // Record the parameter type and any other attributes. 1044 NestTy = *I; 1045 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1046 break; 1047 } 1048 1049 if (NestTy) { 1050 Instruction *Caller = CS.getInstruction(); 1051 std::vector<Value*> NewArgs; 1052 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1053 1054 SmallVector<AttributeWithIndex, 8> NewAttrs; 1055 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1056 1057 // Insert the nest argument into the call argument list, which may 1058 // mean appending it. Likewise for attributes. 1059 1060 // Add any result attributes. 1061 if (Attributes Attr = Attrs.getRetAttributes()) 1062 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1063 1064 { 1065 unsigned Idx = 1; 1066 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1067 do { 1068 if (Idx == NestIdx) { 1069 // Add the chain argument and attributes. 1070 Value *NestVal = Tramp->getOperand(3); 1071 if (NestVal->getType() != NestTy) 1072 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1073 NewArgs.push_back(NestVal); 1074 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1075 } 1076 1077 if (I == E) 1078 break; 1079 1080 // Add the original argument and attributes. 1081 NewArgs.push_back(*I); 1082 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1083 NewAttrs.push_back 1084 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1085 1086 ++Idx, ++I; 1087 } while (1); 1088 } 1089 1090 // Add any function attributes. 1091 if (Attributes Attr = Attrs.getFnAttributes()) 1092 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1093 1094 // The trampoline may have been bitcast to a bogus type (FTy). 1095 // Handle this by synthesizing a new function type, equal to FTy 1096 // with the chain parameter inserted. 1097 1098 std::vector<const Type*> NewTypes; 1099 NewTypes.reserve(FTy->getNumParams()+1); 1100 1101 // Insert the chain's type into the list of parameter types, which may 1102 // mean appending it. 1103 { 1104 unsigned Idx = 1; 1105 FunctionType::param_iterator I = FTy->param_begin(), 1106 E = FTy->param_end(); 1107 1108 do { 1109 if (Idx == NestIdx) 1110 // Add the chain's type. 1111 NewTypes.push_back(NestTy); 1112 1113 if (I == E) 1114 break; 1115 1116 // Add the original type. 1117 NewTypes.push_back(*I); 1118 1119 ++Idx, ++I; 1120 } while (1); 1121 } 1122 1123 // Replace the trampoline call with a direct call. Let the generic 1124 // code sort out any function type mismatches. 1125 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1126 FTy->isVarArg()); 1127 Constant *NewCallee = 1128 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1129 NestF : ConstantExpr::getBitCast(NestF, 1130 PointerType::getUnqual(NewFTy)); 1131 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1132 NewAttrs.end()); 1133 1134 Instruction *NewCaller; 1135 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1136 NewCaller = InvokeInst::Create(NewCallee, 1137 II->getNormalDest(), II->getUnwindDest(), 1138 NewArgs.begin(), NewArgs.end(), 1139 Caller->getName(), Caller); 1140 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1141 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1142 } else { 1143 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1144 Caller->getName(), Caller); 1145 if (cast<CallInst>(Caller)->isTailCall()) 1146 cast<CallInst>(NewCaller)->setTailCall(); 1147 cast<CallInst>(NewCaller)-> 1148 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1149 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1150 } 1151 if (!Caller->getType()->isVoidTy()) 1152 Caller->replaceAllUsesWith(NewCaller); 1153 Caller->eraseFromParent(); 1154 Worklist.Remove(Caller); 1155 return 0; 1156 } 1157 } 1158 1159 // Replace the trampoline call with a direct call. Since there is no 'nest' 1160 // parameter, there is no need to adjust the argument list. Let the generic 1161 // code sort out any function type mismatches. 1162 Constant *NewCallee = 1163 NestF->getType() == PTy ? NestF : 1164 ConstantExpr::getBitCast(NestF, PTy); 1165 CS.setCalledFunction(NewCallee); 1166 return CS.getInstruction(); 1167} 1168 1169