InstCombineCalls.cpp revision a90c5c7605e0d8df96e61d4bf0cc47a7eaea92cf
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/Transforms/Utils/BuildLibCalls.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static const Type *getPromotedType(const Type *Ty) { 25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32/// EnforceKnownAlignment - If the specified pointer points to an object that 33/// we control, modify the object's alignment to PrefAlign. This isn't 34/// often possible though. If alignment is important, a more reliable approach 35/// is to simply align all global variables and allocation instructions to 36/// their preferred alignment from the beginning. 37/// 38static unsigned EnforceKnownAlignment(Value *V, 39 unsigned Align, unsigned PrefAlign) { 40 41 User *U = dyn_cast<User>(V); 42 if (!U) return Align; 43 44 switch (Operator::getOpcode(U)) { 45 default: break; 46 case Instruction::BitCast: 47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 48 case Instruction::GetElementPtr: { 49 // If all indexes are zero, it is just the alignment of the base pointer. 50 bool AllZeroOperands = true; 51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 52 if (!isa<Constant>(*i) || 53 !cast<Constant>(*i)->isNullValue()) { 54 AllZeroOperands = false; 55 break; 56 } 57 58 if (AllZeroOperands) { 59 // Treat this like a bitcast. 60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 61 } 62 return Align; 63 } 64 case Instruction::Alloca: { 65 AllocaInst *AI = cast<AllocaInst>(V); 66 // If there is a requested alignment and if this is an alloca, round up. 67 if (AI->getAlignment() >= PrefAlign) 68 return AI->getAlignment(); 69 AI->setAlignment(PrefAlign); 70 return PrefAlign; 71 } 72 } 73 74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 75 // If there is a large requested alignment and we can, bump up the alignment 76 // of the global. 77 if (GV->isDeclaration()) return Align; 78 79 if (GV->getAlignment() >= PrefAlign) 80 return GV->getAlignment(); 81 // We can only increase the alignment of the global if it has no alignment 82 // specified or if it is not assigned a section. If it is assigned a 83 // section, the global could be densely packed with other objects in the 84 // section, increasing the alignment could cause padding issues. 85 if (!GV->hasSection() || GV->getAlignment() == 0) 86 GV->setAlignment(PrefAlign); 87 return GV->getAlignment(); 88 } 89 90 return Align; 91} 92 93/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 94/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 95/// and it is more than the alignment of the ultimate object, see if we can 96/// increase the alignment of the ultimate object, making this check succeed. 97unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 98 unsigned PrefAlign) { 99 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 100 sizeof(PrefAlign) * CHAR_BIT; 101 APInt Mask = APInt::getAllOnesValue(BitWidth); 102 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 103 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 104 unsigned TrailZ = KnownZero.countTrailingOnes(); 105 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 106 107 if (PrefAlign > Align) 108 Align = EnforceKnownAlignment(V, Align, PrefAlign); 109 110 // We don't need to make any adjustment. 111 return Align; 112} 113 114Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 115 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0)); 116 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1)); 117 unsigned MinAlign = std::min(DstAlign, SrcAlign); 118 unsigned CopyAlign = MI->getAlignment(); 119 120 if (CopyAlign < MinAlign) { 121 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 122 MinAlign, false)); 123 return MI; 124 } 125 126 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 127 // load/store. 128 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 129 if (MemOpLength == 0) return 0; 130 131 // Source and destination pointer types are always "i8*" for intrinsic. See 132 // if the size is something we can handle with a single primitive load/store. 133 // A single load+store correctly handles overlapping memory in the memmove 134 // case. 135 unsigned Size = MemOpLength->getZExtValue(); 136 if (Size == 0) return MI; // Delete this mem transfer. 137 138 if (Size > 8 || (Size&(Size-1))) 139 return 0; // If not 1/2/4/8 bytes, exit. 140 141 // Use an integer load+store unless we can find something better. 142 unsigned SrcAddrSp = 143 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 144 unsigned DstAddrSp = 145 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 146 147 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 148 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 149 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 150 151 // Memcpy forces the use of i8* for the source and destination. That means 152 // that if you're using memcpy to move one double around, you'll get a cast 153 // from double* to i8*. We'd much rather use a double load+store rather than 154 // an i64 load+store, here because this improves the odds that the source or 155 // dest address will be promotable. See if we can find a better type than the 156 // integer datatype. 157 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 158 if (StrippedDest != MI->getArgOperand(0)) { 159 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 160 ->getElementType(); 161 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 162 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 163 // down through these levels if so. 164 while (!SrcETy->isSingleValueType()) { 165 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 166 if (STy->getNumElements() == 1) 167 SrcETy = STy->getElementType(0); 168 else 169 break; 170 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 171 if (ATy->getNumElements() == 1) 172 SrcETy = ATy->getElementType(); 173 else 174 break; 175 } else 176 break; 177 } 178 179 if (SrcETy->isSingleValueType()) { 180 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 181 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 182 } 183 } 184 } 185 186 187 // If the memcpy/memmove provides better alignment info than we can 188 // infer, use it. 189 SrcAlign = std::max(SrcAlign, CopyAlign); 190 DstAlign = std::max(DstAlign, CopyAlign); 191 192 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 193 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 194 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign); 195 InsertNewInstBefore(L, *MI); 196 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign), 197 *MI); 198 199 // Set the size of the copy to 0, it will be deleted on the next iteration. 200 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 201 return MI; 202} 203 204Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 205 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 206 if (MI->getAlignment() < Alignment) { 207 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 208 Alignment, false)); 209 return MI; 210 } 211 212 // Extract the length and alignment and fill if they are constant. 213 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 214 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 215 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 216 return 0; 217 uint64_t Len = LenC->getZExtValue(); 218 Alignment = MI->getAlignment(); 219 220 // If the length is zero, this is a no-op 221 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 222 223 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 224 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 225 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 226 227 Value *Dest = MI->getDest(); 228 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 229 230 // Alignment 0 is identity for alignment 1 for memset, but not store. 231 if (Alignment == 0) Alignment = 1; 232 233 // Extract the fill value and store. 234 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 235 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 236 Dest, false, Alignment), *MI); 237 238 // Set the size of the copy to 0, it will be deleted on the next iteration. 239 MI->setLength(Constant::getNullValue(LenC->getType())); 240 return MI; 241 } 242 243 return 0; 244} 245 246/// visitCallInst - CallInst simplification. This mostly only handles folding 247/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 248/// the heavy lifting. 249/// 250Instruction *InstCombiner::visitCallInst(CallInst &CI) { 251 if (isFreeCall(&CI)) 252 return visitFree(CI); 253 if (isMalloc(&CI)) 254 return visitMalloc(CI); 255 256 // If the caller function is nounwind, mark the call as nounwind, even if the 257 // callee isn't. 258 if (CI.getParent()->getParent()->doesNotThrow() && 259 !CI.doesNotThrow()) { 260 CI.setDoesNotThrow(); 261 return &CI; 262 } 263 264 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 265 if (!II) return visitCallSite(&CI); 266 267 // Intrinsics cannot occur in an invoke, so handle them here instead of in 268 // visitCallSite. 269 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 270 bool Changed = false; 271 272 // memmove/cpy/set of zero bytes is a noop. 273 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 274 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 275 276 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 277 if (CI->getZExtValue() == 1) { 278 // Replace the instruction with just byte operations. We would 279 // transform other cases to loads/stores, but we don't know if 280 // alignment is sufficient. 281 } 282 } 283 284 // If we have a memmove and the source operation is a constant global, 285 // then the source and dest pointers can't alias, so we can change this 286 // into a call to memcpy. 287 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 288 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 289 if (GVSrc->isConstant()) { 290 Module *M = CI.getParent()->getParent()->getParent(); 291 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 292 const Type *Tys[3] = { CI.getArgOperand(0)->getType(), 293 CI.getArgOperand(1)->getType(), 294 CI.getArgOperand(2)->getType() }; 295 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3)); 296 Changed = true; 297 } 298 } 299 300 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 301 // memmove(x,x,size) -> noop. 302 if (MTI->getSource() == MTI->getDest()) 303 return EraseInstFromFunction(CI); 304 } 305 306 // If we can determine a pointer alignment that is bigger than currently 307 // set, update the alignment. 308 if (isa<MemTransferInst>(MI)) { 309 if (Instruction *I = SimplifyMemTransfer(MI)) 310 return I; 311 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 312 if (Instruction *I = SimplifyMemSet(MSI)) 313 return I; 314 } 315 316 if (Changed) return II; 317 } 318 319 switch (II->getIntrinsicID()) { 320 default: break; 321 case Intrinsic::objectsize: { 322 // We need target data for just about everything so depend on it. 323 if (!TD) break; 324 325 const Type *ReturnTy = CI.getType(); 326 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 327 328 // Get to the real allocated thing and offset as fast as possible. 329 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 330 331 // If we've stripped down to a single global variable that we 332 // can know the size of then just return that. 333 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 334 if (GV->hasDefinitiveInitializer()) { 335 Constant *C = GV->getInitializer(); 336 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 337 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 338 } else { 339 // Can't determine size of the GV. 340 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 341 return ReplaceInstUsesWith(CI, RetVal); 342 } 343 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 344 // Get alloca size. 345 if (AI->getAllocatedType()->isSized()) { 346 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 347 if (AI->isArrayAllocation()) { 348 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 349 if (!C) break; 350 AllocaSize *= C->getZExtValue(); 351 } 352 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 353 } 354 } else if (CallInst *MI = extractMallocCall(Op1)) { 355 const Type* MallocType = getMallocAllocatedType(MI); 356 // Get alloca size. 357 if (MallocType && MallocType->isSized()) { 358 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 359 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 360 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 361 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 362 } 363 } 364 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 365 // Only handle constant GEPs here. 366 if (CE->getOpcode() != Instruction::GetElementPtr) break; 367 GEPOperator *GEP = cast<GEPOperator>(CE); 368 369 // Make sure we're not a constant offset from an external 370 // global. 371 Value *Operand = GEP->getPointerOperand(); 372 Operand = Operand->stripPointerCasts(); 373 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 374 if (!GV->hasDefinitiveInitializer()) break; 375 376 // Get what we're pointing to and its size. 377 const PointerType *BaseType = 378 cast<PointerType>(Operand->getType()); 379 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 380 381 // Get the current byte offset into the thing. Use the original 382 // operand in case we're looking through a bitcast. 383 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 384 const PointerType *OffsetType = 385 cast<PointerType>(GEP->getPointerOperand()->getType()); 386 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 387 388 if (Size < Offset) { 389 // Out of bound reference? Negative index normalized to large 390 // index? Just return "I don't know". 391 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 392 return ReplaceInstUsesWith(CI, RetVal); 393 } 394 395 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 396 return ReplaceInstUsesWith(CI, RetVal); 397 } 398 399 // Do not return "I don't know" here. Later optimization passes could 400 // make it possible to evaluate objectsize to a constant. 401 break; 402 } 403 case Intrinsic::bswap: 404 // bswap(bswap(x)) -> x 405 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 406 if (Operand->getIntrinsicID() == Intrinsic::bswap) 407 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 408 409 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 410 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 411 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 412 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 413 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 414 TI->getType()->getPrimitiveSizeInBits(); 415 Value *CV = ConstantInt::get(Operand->getType(), C); 416 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 417 return new TruncInst(V, TI->getType()); 418 } 419 } 420 421 break; 422 case Intrinsic::powi: 423 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 424 // powi(x, 0) -> 1.0 425 if (Power->isZero()) 426 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 427 // powi(x, 1) -> x 428 if (Power->isOne()) 429 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 430 // powi(x, -1) -> 1/x 431 if (Power->isAllOnesValue()) 432 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 433 II->getArgOperand(0)); 434 } 435 break; 436 case Intrinsic::cttz: { 437 // If all bits below the first known one are known zero, 438 // this value is constant. 439 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 440 uint32_t BitWidth = IT->getBitWidth(); 441 APInt KnownZero(BitWidth, 0); 442 APInt KnownOne(BitWidth, 0); 443 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 444 KnownZero, KnownOne); 445 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 446 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 447 if ((Mask & KnownZero) == Mask) 448 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 449 APInt(BitWidth, TrailingZeros))); 450 451 } 452 break; 453 case Intrinsic::ctlz: { 454 // If all bits above the first known one are known zero, 455 // this value is constant. 456 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 457 uint32_t BitWidth = IT->getBitWidth(); 458 APInt KnownZero(BitWidth, 0); 459 APInt KnownOne(BitWidth, 0); 460 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 461 KnownZero, KnownOne); 462 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 463 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 464 if ((Mask & KnownZero) == Mask) 465 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 466 APInt(BitWidth, LeadingZeros))); 467 468 } 469 break; 470 case Intrinsic::uadd_with_overflow: { 471 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 472 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 473 uint32_t BitWidth = IT->getBitWidth(); 474 APInt Mask = APInt::getSignBit(BitWidth); 475 APInt LHSKnownZero(BitWidth, 0); 476 APInt LHSKnownOne(BitWidth, 0); 477 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 478 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 479 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 480 481 if (LHSKnownNegative || LHSKnownPositive) { 482 APInt RHSKnownZero(BitWidth, 0); 483 APInt RHSKnownOne(BitWidth, 0); 484 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 485 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 486 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 487 if (LHSKnownNegative && RHSKnownNegative) { 488 // The sign bit is set in both cases: this MUST overflow. 489 // Create a simple add instruction, and insert it into the struct. 490 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 491 Worklist.Add(Add); 492 Constant *V[] = { 493 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 494 }; 495 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 496 return InsertValueInst::Create(Struct, Add, 0); 497 } 498 499 if (LHSKnownPositive && RHSKnownPositive) { 500 // The sign bit is clear in both cases: this CANNOT overflow. 501 // Create a simple add instruction, and insert it into the struct. 502 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 503 Worklist.Add(Add); 504 Constant *V[] = { 505 UndefValue::get(LHS->getType()), 506 ConstantInt::getFalse(II->getContext()) 507 }; 508 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 509 return InsertValueInst::Create(Struct, Add, 0); 510 } 511 } 512 } 513 // FALL THROUGH uadd into sadd 514 case Intrinsic::sadd_with_overflow: 515 // Canonicalize constants into the RHS. 516 if (isa<Constant>(II->getArgOperand(0)) && 517 !isa<Constant>(II->getArgOperand(1))) { 518 Value *LHS = II->getArgOperand(0); 519 II->setArgOperand(0, II->getArgOperand(1)); 520 II->setArgOperand(1, LHS); 521 return II; 522 } 523 524 // X + undef -> undef 525 if (isa<UndefValue>(II->getArgOperand(1))) 526 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 527 528 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 529 // X + 0 -> {X, false} 530 if (RHS->isZero()) { 531 Constant *V[] = { 532 UndefValue::get(II->getCalledValue()->getType()), 533 ConstantInt::getFalse(II->getContext()) 534 }; 535 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 536 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 537 } 538 } 539 break; 540 case Intrinsic::usub_with_overflow: 541 case Intrinsic::ssub_with_overflow: 542 // undef - X -> undef 543 // X - undef -> undef 544 if (isa<UndefValue>(II->getArgOperand(0)) || 545 isa<UndefValue>(II->getArgOperand(1))) 546 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 547 548 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 549 // X - 0 -> {X, false} 550 if (RHS->isZero()) { 551 Constant *V[] = { 552 UndefValue::get(II->getArgOperand(0)->getType()), 553 ConstantInt::getFalse(II->getContext()) 554 }; 555 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 556 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 557 } 558 } 559 break; 560 case Intrinsic::umul_with_overflow: 561 case Intrinsic::smul_with_overflow: 562 // Canonicalize constants into the RHS. 563 if (isa<Constant>(II->getArgOperand(0)) && 564 !isa<Constant>(II->getArgOperand(1))) { 565 Value *LHS = II->getArgOperand(0); 566 II->setArgOperand(0, II->getArgOperand(1)); 567 II->setArgOperand(1, LHS); 568 return II; 569 } 570 571 // X * undef -> undef 572 if (isa<UndefValue>(II->getArgOperand(1))) 573 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 574 575 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 576 // X*0 -> {0, false} 577 if (RHSI->isZero()) 578 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 579 580 // X * 1 -> {X, false} 581 if (RHSI->equalsInt(1)) { 582 Constant *V[] = { 583 UndefValue::get(II->getArgOperand(0)->getType()), 584 ConstantInt::getFalse(II->getContext()) 585 }; 586 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 587 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 588 } 589 } 590 break; 591 case Intrinsic::ppc_altivec_lvx: 592 case Intrinsic::ppc_altivec_lvxl: 593 case Intrinsic::x86_sse_loadu_ps: 594 case Intrinsic::x86_sse2_loadu_pd: 595 case Intrinsic::x86_sse2_loadu_dq: 596 // Turn PPC lvx -> load if the pointer is known aligned. 597 // Turn X86 loadups -> load if the pointer is known aligned. 598 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 599 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 600 PointerType::getUnqual(II->getType())); 601 return new LoadInst(Ptr); 602 } 603 break; 604 case Intrinsic::ppc_altivec_stvx: 605 case Intrinsic::ppc_altivec_stvxl: 606 // Turn stvx -> store if the pointer is known aligned. 607 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) { 608 const Type *OpPtrTy = 609 PointerType::getUnqual(II->getArgOperand(0)->getType()); 610 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 611 return new StoreInst(II->getArgOperand(0), Ptr); 612 } 613 break; 614 case Intrinsic::x86_sse_storeu_ps: 615 case Intrinsic::x86_sse2_storeu_pd: 616 case Intrinsic::x86_sse2_storeu_dq: 617 // Turn X86 storeu -> store if the pointer is known aligned. 618 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 619 const Type *OpPtrTy = 620 PointerType::getUnqual(II->getArgOperand(1)->getType()); 621 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 622 return new StoreInst(II->getArgOperand(1), Ptr); 623 } 624 break; 625 626 case Intrinsic::x86_sse_cvttss2si: { 627 // These intrinsics only demands the 0th element of its input vector. If 628 // we can simplify the input based on that, do so now. 629 unsigned VWidth = 630 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 631 APInt DemandedElts(VWidth, 1); 632 APInt UndefElts(VWidth, 0); 633 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 634 UndefElts)) { 635 II->setArgOperand(0, V); 636 return II; 637 } 638 break; 639 } 640 641 case Intrinsic::ppc_altivec_vperm: 642 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 643 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 644 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 645 646 // Check that all of the elements are integer constants or undefs. 647 bool AllEltsOk = true; 648 for (unsigned i = 0; i != 16; ++i) { 649 if (!isa<ConstantInt>(Mask->getOperand(i)) && 650 !isa<UndefValue>(Mask->getOperand(i))) { 651 AllEltsOk = false; 652 break; 653 } 654 } 655 656 if (AllEltsOk) { 657 // Cast the input vectors to byte vectors. 658 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), Mask->getType()); 659 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), Mask->getType()); 660 Value *Result = UndefValue::get(Op0->getType()); 661 662 // Only extract each element once. 663 Value *ExtractedElts[32]; 664 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 665 666 for (unsigned i = 0; i != 16; ++i) { 667 if (isa<UndefValue>(Mask->getOperand(i))) 668 continue; 669 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 670 Idx &= 31; // Match the hardware behavior. 671 672 if (ExtractedElts[Idx] == 0) { 673 ExtractedElts[Idx] = 674 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 675 ConstantInt::get(Type::getInt32Ty(II->getContext()), 676 Idx&15, false), "tmp"); 677 } 678 679 // Insert this value into the result vector. 680 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 681 ConstantInt::get(Type::getInt32Ty(II->getContext()), 682 i, false), "tmp"); 683 } 684 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 685 } 686 } 687 break; 688 689 case Intrinsic::stackrestore: { 690 // If the save is right next to the restore, remove the restore. This can 691 // happen when variable allocas are DCE'd. 692 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 693 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 694 BasicBlock::iterator BI = SS; 695 if (&*++BI == II) 696 return EraseInstFromFunction(CI); 697 } 698 } 699 700 // Scan down this block to see if there is another stack restore in the 701 // same block without an intervening call/alloca. 702 BasicBlock::iterator BI = II; 703 TerminatorInst *TI = II->getParent()->getTerminator(); 704 bool CannotRemove = false; 705 for (++BI; &*BI != TI; ++BI) { 706 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 707 CannotRemove = true; 708 break; 709 } 710 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 711 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 712 // If there is a stackrestore below this one, remove this one. 713 if (II->getIntrinsicID() == Intrinsic::stackrestore) 714 return EraseInstFromFunction(CI); 715 // Otherwise, ignore the intrinsic. 716 } else { 717 // If we found a non-intrinsic call, we can't remove the stack 718 // restore. 719 CannotRemove = true; 720 break; 721 } 722 } 723 } 724 725 // If the stack restore is in a return/unwind block and if there are no 726 // allocas or calls between the restore and the return, nuke the restore. 727 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 728 return EraseInstFromFunction(CI); 729 break; 730 } 731 } 732 733 return visitCallSite(II); 734} 735 736// InvokeInst simplification 737// 738Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 739 return visitCallSite(&II); 740} 741 742/// isSafeToEliminateVarargsCast - If this cast does not affect the value 743/// passed through the varargs area, we can eliminate the use of the cast. 744static bool isSafeToEliminateVarargsCast(const CallSite CS, 745 const CastInst * const CI, 746 const TargetData * const TD, 747 const int ix) { 748 if (!CI->isLosslessCast()) 749 return false; 750 751 // The size of ByVal arguments is derived from the type, so we 752 // can't change to a type with a different size. If the size were 753 // passed explicitly we could avoid this check. 754 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 755 return true; 756 757 const Type* SrcTy = 758 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 759 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 760 if (!SrcTy->isSized() || !DstTy->isSized()) 761 return false; 762 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 763 return false; 764 return true; 765} 766 767namespace { 768class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 769 InstCombiner *IC; 770protected: 771 void replaceCall(Value *With) { 772 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 773 } 774 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 775 if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp))) { 776 if (SizeCI->isAllOnesValue()) 777 return true; 778 if (isString) 779 return SizeCI->getZExtValue() >= 780 GetStringLength(CI->getOperand(SizeArgOp)); 781 if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getOperand(SizeArgOp))) 782 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 783 } 784 return false; 785 } 786public: 787 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 788 Instruction *NewInstruction; 789}; 790} // end anonymous namespace 791 792// Try to fold some different type of calls here. 793// Currently we're only working with the checking functions, memcpy_chk, 794// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 795// strcat_chk and strncat_chk. 796Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 797 if (CI->getCalledFunction() == 0) return 0; 798 799 InstCombineFortifiedLibCalls Simplifier(this); 800 Simplifier.fold(CI, TD); 801 return Simplifier.NewInstruction; 802} 803 804// visitCallSite - Improvements for call and invoke instructions. 805// 806Instruction *InstCombiner::visitCallSite(CallSite CS) { 807 bool Changed = false; 808 809 // If the callee is a constexpr cast of a function, attempt to move the cast 810 // to the arguments of the call/invoke. 811 if (transformConstExprCastCall(CS)) return 0; 812 813 Value *Callee = CS.getCalledValue(); 814 815 if (Function *CalleeF = dyn_cast<Function>(Callee)) 816 // If the call and callee calling conventions don't match, this call must 817 // be unreachable, as the call is undefined. 818 if (CalleeF->getCallingConv() != CS.getCallingConv() && 819 // Only do this for calls to a function with a body. A prototype may 820 // not actually end up matching the implementation's calling conv for a 821 // variety of reasons (e.g. it may be written in assembly). 822 !CalleeF->isDeclaration()) { 823 Instruction *OldCall = CS.getInstruction(); 824 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 825 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 826 OldCall); 827 // If OldCall dues not return void then replaceAllUsesWith undef. 828 // This allows ValueHandlers and custom metadata to adjust itself. 829 if (!OldCall->getType()->isVoidTy()) 830 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 831 if (isa<CallInst>(OldCall)) 832 return EraseInstFromFunction(*OldCall); 833 834 // We cannot remove an invoke, because it would change the CFG, just 835 // change the callee to a null pointer. 836 cast<InvokeInst>(OldCall)->setCalledFunction( 837 Constant::getNullValue(CalleeF->getType())); 838 return 0; 839 } 840 841 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 842 // This instruction is not reachable, just remove it. We insert a store to 843 // undef so that we know that this code is not reachable, despite the fact 844 // that we can't modify the CFG here. 845 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 846 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 847 CS.getInstruction()); 848 849 // If CS does not return void then replaceAllUsesWith undef. 850 // This allows ValueHandlers and custom metadata to adjust itself. 851 if (!CS.getInstruction()->getType()->isVoidTy()) 852 CS.getInstruction()-> 853 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 854 855 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 856 // Don't break the CFG, insert a dummy cond branch. 857 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 858 ConstantInt::getTrue(Callee->getContext()), II); 859 } 860 return EraseInstFromFunction(*CS.getInstruction()); 861 } 862 863 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 864 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 865 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 866 return transformCallThroughTrampoline(CS); 867 868 const PointerType *PTy = cast<PointerType>(Callee->getType()); 869 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 870 if (FTy->isVarArg()) { 871 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 872 // See if we can optimize any arguments passed through the varargs area of 873 // the call. 874 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 875 E = CS.arg_end(); I != E; ++I, ++ix) { 876 CastInst *CI = dyn_cast<CastInst>(*I); 877 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 878 *I = CI->getOperand(0); 879 Changed = true; 880 } 881 } 882 } 883 884 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 885 // Inline asm calls cannot throw - mark them 'nounwind'. 886 CS.setDoesNotThrow(); 887 Changed = true; 888 } 889 890 // Try to optimize the call if possible, we require TargetData for most of 891 // this. None of these calls are seen as possibly dead so go ahead and 892 // delete the instruction now. 893 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 894 Instruction *I = tryOptimizeCall(CI, TD); 895 // If we changed something return the result, etc. Otherwise let 896 // the fallthrough check. 897 if (I) return EraseInstFromFunction(*I); 898 } 899 900 return Changed ? CS.getInstruction() : 0; 901} 902 903// transformConstExprCastCall - If the callee is a constexpr cast of a function, 904// attempt to move the cast to the arguments of the call/invoke. 905// 906bool InstCombiner::transformConstExprCastCall(CallSite CS) { 907 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 908 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 909 if (CE->getOpcode() != Instruction::BitCast || 910 !isa<Function>(CE->getOperand(0))) 911 return false; 912 Function *Callee = cast<Function>(CE->getOperand(0)); 913 Instruction *Caller = CS.getInstruction(); 914 const AttrListPtr &CallerPAL = CS.getAttributes(); 915 916 // Okay, this is a cast from a function to a different type. Unless doing so 917 // would cause a type conversion of one of our arguments, change this call to 918 // be a direct call with arguments casted to the appropriate types. 919 // 920 const FunctionType *FT = Callee->getFunctionType(); 921 const Type *OldRetTy = Caller->getType(); 922 const Type *NewRetTy = FT->getReturnType(); 923 924 if (NewRetTy->isStructTy()) 925 return false; // TODO: Handle multiple return values. 926 927 // Check to see if we are changing the return type... 928 if (OldRetTy != NewRetTy) { 929 if (Callee->isDeclaration() && 930 // Conversion is ok if changing from one pointer type to another or from 931 // a pointer to an integer of the same size. 932 !((OldRetTy->isPointerTy() || !TD || 933 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 934 (NewRetTy->isPointerTy() || !TD || 935 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 936 return false; // Cannot transform this return value. 937 938 if (!Caller->use_empty() && 939 // void -> non-void is handled specially 940 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 941 return false; // Cannot transform this return value. 942 943 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 944 Attributes RAttrs = CallerPAL.getRetAttributes(); 945 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 946 return false; // Attribute not compatible with transformed value. 947 } 948 949 // If the callsite is an invoke instruction, and the return value is used by 950 // a PHI node in a successor, we cannot change the return type of the call 951 // because there is no place to put the cast instruction (without breaking 952 // the critical edge). Bail out in this case. 953 if (!Caller->use_empty()) 954 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 955 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 956 UI != E; ++UI) 957 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 958 if (PN->getParent() == II->getNormalDest() || 959 PN->getParent() == II->getUnwindDest()) 960 return false; 961 } 962 963 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 964 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 965 966 CallSite::arg_iterator AI = CS.arg_begin(); 967 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 968 const Type *ParamTy = FT->getParamType(i); 969 const Type *ActTy = (*AI)->getType(); 970 971 if (!CastInst::isCastable(ActTy, ParamTy)) 972 return false; // Cannot transform this parameter value. 973 974 if (CallerPAL.getParamAttributes(i + 1) 975 & Attribute::typeIncompatible(ParamTy)) 976 return false; // Attribute not compatible with transformed value. 977 978 // Converting from one pointer type to another or between a pointer and an 979 // integer of the same size is safe even if we do not have a body. 980 bool isConvertible = ActTy == ParamTy || 981 (TD && ((ParamTy->isPointerTy() || 982 ParamTy == TD->getIntPtrType(Caller->getContext())) && 983 (ActTy->isPointerTy() || 984 ActTy == TD->getIntPtrType(Caller->getContext())))); 985 if (Callee->isDeclaration() && !isConvertible) return false; 986 } 987 988 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 989 Callee->isDeclaration()) 990 return false; // Do not delete arguments unless we have a function body. 991 992 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 993 !CallerPAL.isEmpty()) 994 // In this case we have more arguments than the new function type, but we 995 // won't be dropping them. Check that these extra arguments have attributes 996 // that are compatible with being a vararg call argument. 997 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 998 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 999 break; 1000 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1001 if (PAttrs & Attribute::VarArgsIncompatible) 1002 return false; 1003 } 1004 1005 // Okay, we decided that this is a safe thing to do: go ahead and start 1006 // inserting cast instructions as necessary... 1007 std::vector<Value*> Args; 1008 Args.reserve(NumActualArgs); 1009 SmallVector<AttributeWithIndex, 8> attrVec; 1010 attrVec.reserve(NumCommonArgs); 1011 1012 // Get any return attributes. 1013 Attributes RAttrs = CallerPAL.getRetAttributes(); 1014 1015 // If the return value is not being used, the type may not be compatible 1016 // with the existing attributes. Wipe out any problematic attributes. 1017 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1018 1019 // Add the new return attributes. 1020 if (RAttrs) 1021 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1022 1023 AI = CS.arg_begin(); 1024 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1025 const Type *ParamTy = FT->getParamType(i); 1026 if ((*AI)->getType() == ParamTy) { 1027 Args.push_back(*AI); 1028 } else { 1029 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1030 false, ParamTy, false); 1031 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1032 } 1033 1034 // Add any parameter attributes. 1035 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1036 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1037 } 1038 1039 // If the function takes more arguments than the call was taking, add them 1040 // now. 1041 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1042 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1043 1044 // If we are removing arguments to the function, emit an obnoxious warning. 1045 if (FT->getNumParams() < NumActualArgs) { 1046 if (!FT->isVarArg()) { 1047 errs() << "WARNING: While resolving call to function '" 1048 << Callee->getName() << "' arguments were dropped!\n"; 1049 } else { 1050 // Add all of the arguments in their promoted form to the arg list. 1051 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1052 const Type *PTy = getPromotedType((*AI)->getType()); 1053 if (PTy != (*AI)->getType()) { 1054 // Must promote to pass through va_arg area! 1055 Instruction::CastOps opcode = 1056 CastInst::getCastOpcode(*AI, false, PTy, false); 1057 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1058 } else { 1059 Args.push_back(*AI); 1060 } 1061 1062 // Add any parameter attributes. 1063 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1064 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1065 } 1066 } 1067 } 1068 1069 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1070 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1071 1072 if (NewRetTy->isVoidTy()) 1073 Caller->setName(""); // Void type should not have a name. 1074 1075 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1076 attrVec.end()); 1077 1078 Instruction *NC; 1079 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1080 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1081 Args.begin(), Args.end(), 1082 Caller->getName(), Caller); 1083 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1084 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1085 } else { 1086 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1087 Caller->getName(), Caller); 1088 CallInst *CI = cast<CallInst>(Caller); 1089 if (CI->isTailCall()) 1090 cast<CallInst>(NC)->setTailCall(); 1091 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1092 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1093 } 1094 1095 // Insert a cast of the return type as necessary. 1096 Value *NV = NC; 1097 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1098 if (!NV->getType()->isVoidTy()) { 1099 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1100 OldRetTy, false); 1101 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1102 1103 // If this is an invoke instruction, we should insert it after the first 1104 // non-phi, instruction in the normal successor block. 1105 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1106 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1107 InsertNewInstBefore(NC, *I); 1108 } else { 1109 // Otherwise, it's a call, just insert cast right after the call instr 1110 InsertNewInstBefore(NC, *Caller); 1111 } 1112 Worklist.AddUsersToWorkList(*Caller); 1113 } else { 1114 NV = UndefValue::get(Caller->getType()); 1115 } 1116 } 1117 1118 1119 if (!Caller->use_empty()) 1120 Caller->replaceAllUsesWith(NV); 1121 1122 EraseInstFromFunction(*Caller); 1123 return true; 1124} 1125 1126// transformCallThroughTrampoline - Turn a call to a function created by the 1127// init_trampoline intrinsic into a direct call to the underlying function. 1128// 1129Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1130 Value *Callee = CS.getCalledValue(); 1131 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1132 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1133 const AttrListPtr &Attrs = CS.getAttributes(); 1134 1135 // If the call already has the 'nest' attribute somewhere then give up - 1136 // otherwise 'nest' would occur twice after splicing in the chain. 1137 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1138 return 0; 1139 1140 IntrinsicInst *Tramp = 1141 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1142 1143 Function *NestF = cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1144 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1145 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1146 1147 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1148 if (!NestAttrs.isEmpty()) { 1149 unsigned NestIdx = 1; 1150 const Type *NestTy = 0; 1151 Attributes NestAttr = Attribute::None; 1152 1153 // Look for a parameter marked with the 'nest' attribute. 1154 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1155 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1156 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1157 // Record the parameter type and any other attributes. 1158 NestTy = *I; 1159 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1160 break; 1161 } 1162 1163 if (NestTy) { 1164 Instruction *Caller = CS.getInstruction(); 1165 std::vector<Value*> NewArgs; 1166 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1167 1168 SmallVector<AttributeWithIndex, 8> NewAttrs; 1169 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1170 1171 // Insert the nest argument into the call argument list, which may 1172 // mean appending it. Likewise for attributes. 1173 1174 // Add any result attributes. 1175 if (Attributes Attr = Attrs.getRetAttributes()) 1176 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1177 1178 { 1179 unsigned Idx = 1; 1180 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1181 do { 1182 if (Idx == NestIdx) { 1183 // Add the chain argument and attributes. 1184 Value *NestVal = Tramp->getArgOperand(2); 1185 if (NestVal->getType() != NestTy) 1186 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1187 NewArgs.push_back(NestVal); 1188 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1189 } 1190 1191 if (I == E) 1192 break; 1193 1194 // Add the original argument and attributes. 1195 NewArgs.push_back(*I); 1196 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1197 NewAttrs.push_back 1198 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1199 1200 ++Idx, ++I; 1201 } while (1); 1202 } 1203 1204 // Add any function attributes. 1205 if (Attributes Attr = Attrs.getFnAttributes()) 1206 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1207 1208 // The trampoline may have been bitcast to a bogus type (FTy). 1209 // Handle this by synthesizing a new function type, equal to FTy 1210 // with the chain parameter inserted. 1211 1212 std::vector<const Type*> NewTypes; 1213 NewTypes.reserve(FTy->getNumParams()+1); 1214 1215 // Insert the chain's type into the list of parameter types, which may 1216 // mean appending it. 1217 { 1218 unsigned Idx = 1; 1219 FunctionType::param_iterator I = FTy->param_begin(), 1220 E = FTy->param_end(); 1221 1222 do { 1223 if (Idx == NestIdx) 1224 // Add the chain's type. 1225 NewTypes.push_back(NestTy); 1226 1227 if (I == E) 1228 break; 1229 1230 // Add the original type. 1231 NewTypes.push_back(*I); 1232 1233 ++Idx, ++I; 1234 } while (1); 1235 } 1236 1237 // Replace the trampoline call with a direct call. Let the generic 1238 // code sort out any function type mismatches. 1239 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1240 FTy->isVarArg()); 1241 Constant *NewCallee = 1242 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1243 NestF : ConstantExpr::getBitCast(NestF, 1244 PointerType::getUnqual(NewFTy)); 1245 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1246 NewAttrs.end()); 1247 1248 Instruction *NewCaller; 1249 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1250 NewCaller = InvokeInst::Create(NewCallee, 1251 II->getNormalDest(), II->getUnwindDest(), 1252 NewArgs.begin(), NewArgs.end(), 1253 Caller->getName(), Caller); 1254 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1255 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1256 } else { 1257 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1258 Caller->getName(), Caller); 1259 if (cast<CallInst>(Caller)->isTailCall()) 1260 cast<CallInst>(NewCaller)->setTailCall(); 1261 cast<CallInst>(NewCaller)-> 1262 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1263 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1264 } 1265 if (!Caller->getType()->isVoidTy()) 1266 Caller->replaceAllUsesWith(NewCaller); 1267 Caller->eraseFromParent(); 1268 Worklist.Remove(Caller); 1269 return 0; 1270 } 1271 } 1272 1273 // Replace the trampoline call with a direct call. Since there is no 'nest' 1274 // parameter, there is no need to adjust the argument list. Let the generic 1275 // code sort out any function type mismatches. 1276 Constant *NewCallee = 1277 NestF->getType() == PTy ? NestF : 1278 ConstantExpr::getBitCast(NestF, PTy); 1279 CS.setCalledFunction(NewCallee); 1280 return CS.getInstruction(); 1281} 1282 1283