InstCombineCalls.cpp revision e16829b401409b398c9de9847c1d12eb931f7d63
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/Transforms/Utils/BuildLibCalls.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static const Type *getPromotedType(const Type *Ty) { 25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32/// EnforceKnownAlignment - If the specified pointer points to an object that 33/// we control, modify the object's alignment to PrefAlign. This isn't 34/// often possible though. If alignment is important, a more reliable approach 35/// is to simply align all global variables and allocation instructions to 36/// their preferred alignment from the beginning. 37/// 38static unsigned EnforceKnownAlignment(Value *V, 39 unsigned Align, unsigned PrefAlign) { 40 41 User *U = dyn_cast<User>(V); 42 if (!U) return Align; 43 44 switch (Operator::getOpcode(U)) { 45 default: break; 46 case Instruction::BitCast: 47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 48 case Instruction::GetElementPtr: { 49 // If all indexes are zero, it is just the alignment of the base pointer. 50 bool AllZeroOperands = true; 51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 52 if (!isa<Constant>(*i) || 53 !cast<Constant>(*i)->isNullValue()) { 54 AllZeroOperands = false; 55 break; 56 } 57 58 if (AllZeroOperands) { 59 // Treat this like a bitcast. 60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 61 } 62 return Align; 63 } 64 case Instruction::Alloca: { 65 AllocaInst *AI = cast<AllocaInst>(V); 66 // If there is a requested alignment and if this is an alloca, round up. 67 if (AI->getAlignment() >= PrefAlign) 68 return AI->getAlignment(); 69 AI->setAlignment(PrefAlign); 70 return PrefAlign; 71 } 72 } 73 74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 75 // If there is a large requested alignment and we can, bump up the alignment 76 // of the global. 77 if (GV->isDeclaration()) return Align; 78 79 if (GV->getAlignment() >= PrefAlign) 80 return GV->getAlignment(); 81 // We can only increase the alignment of the global if it has no alignment 82 // specified or if it is not assigned a section. If it is assigned a 83 // section, the global could be densely packed with other objects in the 84 // section, increasing the alignment could cause padding issues. 85 if (!GV->hasSection() || GV->getAlignment() == 0) 86 GV->setAlignment(PrefAlign); 87 return GV->getAlignment(); 88 } 89 90 return Align; 91} 92 93/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 94/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 95/// and it is more than the alignment of the ultimate object, see if we can 96/// increase the alignment of the ultimate object, making this check succeed. 97unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 98 unsigned PrefAlign) { 99 assert(V->getType()->isPointerTy() && 100 "GetOrEnforceKnownAlignment expects a pointer!"); 101 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; 102 APInt Mask = APInt::getAllOnesValue(BitWidth); 103 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 104 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 105 unsigned TrailZ = KnownZero.countTrailingOnes(); 106 107 // Avoid trouble with rediculously large TrailZ values, such as 108 // those computed from a null pointer. 109 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 110 111 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 112 unsigned MaxAlign = Value::MaximumAlignment; 113 114 // LLVM doesn't support alignments larger than this currently. 115 Align = std::min(Align, MaxAlign); 116 117 if (PrefAlign > Align) 118 Align = EnforceKnownAlignment(V, Align, PrefAlign); 119 120 // We don't need to make any adjustment. 121 return Align; 122} 123 124Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 125 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0)); 126 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1)); 127 unsigned MinAlign = std::min(DstAlign, SrcAlign); 128 unsigned CopyAlign = MI->getAlignment(); 129 130 if (CopyAlign < MinAlign) { 131 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 132 MinAlign, false)); 133 return MI; 134 } 135 136 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 137 // load/store. 138 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 139 if (MemOpLength == 0) return 0; 140 141 // Source and destination pointer types are always "i8*" for intrinsic. See 142 // if the size is something we can handle with a single primitive load/store. 143 // A single load+store correctly handles overlapping memory in the memmove 144 // case. 145 unsigned Size = MemOpLength->getZExtValue(); 146 if (Size == 0) return MI; // Delete this mem transfer. 147 148 if (Size > 8 || (Size&(Size-1))) 149 return 0; // If not 1/2/4/8 bytes, exit. 150 151 // Use an integer load+store unless we can find something better. 152 unsigned SrcAddrSp = 153 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 154 unsigned DstAddrSp = 155 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 156 157 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 158 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 159 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 160 161 // Memcpy forces the use of i8* for the source and destination. That means 162 // that if you're using memcpy to move one double around, you'll get a cast 163 // from double* to i8*. We'd much rather use a double load+store rather than 164 // an i64 load+store, here because this improves the odds that the source or 165 // dest address will be promotable. See if we can find a better type than the 166 // integer datatype. 167 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 168 if (StrippedDest != MI->getArgOperand(0)) { 169 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 170 ->getElementType(); 171 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 172 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 173 // down through these levels if so. 174 while (!SrcETy->isSingleValueType()) { 175 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 176 if (STy->getNumElements() == 1) 177 SrcETy = STy->getElementType(0); 178 else 179 break; 180 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 181 if (ATy->getNumElements() == 1) 182 SrcETy = ATy->getElementType(); 183 else 184 break; 185 } else 186 break; 187 } 188 189 if (SrcETy->isSingleValueType()) { 190 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 191 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 192 } 193 } 194 } 195 196 197 // If the memcpy/memmove provides better alignment info than we can 198 // infer, use it. 199 SrcAlign = std::max(SrcAlign, CopyAlign); 200 DstAlign = std::max(DstAlign, CopyAlign); 201 202 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 203 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 204 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign); 205 InsertNewInstBefore(L, *MI); 206 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign), 207 *MI); 208 209 // Set the size of the copy to 0, it will be deleted on the next iteration. 210 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 211 return MI; 212} 213 214Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 215 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 216 if (MI->getAlignment() < Alignment) { 217 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 218 Alignment, false)); 219 return MI; 220 } 221 222 // Extract the length and alignment and fill if they are constant. 223 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 224 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 225 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 226 return 0; 227 uint64_t Len = LenC->getZExtValue(); 228 Alignment = MI->getAlignment(); 229 230 // If the length is zero, this is a no-op 231 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 232 233 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 234 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 235 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 236 237 Value *Dest = MI->getDest(); 238 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 239 240 // Alignment 0 is identity for alignment 1 for memset, but not store. 241 if (Alignment == 0) Alignment = 1; 242 243 // Extract the fill value and store. 244 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 245 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 246 Dest, false, Alignment), *MI); 247 248 // Set the size of the copy to 0, it will be deleted on the next iteration. 249 MI->setLength(Constant::getNullValue(LenC->getType())); 250 return MI; 251 } 252 253 return 0; 254} 255 256/// visitCallInst - CallInst simplification. This mostly only handles folding 257/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 258/// the heavy lifting. 259/// 260Instruction *InstCombiner::visitCallInst(CallInst &CI) { 261 if (isFreeCall(&CI)) 262 return visitFree(CI); 263 if (isMalloc(&CI)) 264 return visitMalloc(CI); 265 266 // If the caller function is nounwind, mark the call as nounwind, even if the 267 // callee isn't. 268 if (CI.getParent()->getParent()->doesNotThrow() && 269 !CI.doesNotThrow()) { 270 CI.setDoesNotThrow(); 271 return &CI; 272 } 273 274 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 275 if (!II) return visitCallSite(&CI); 276 277 // Intrinsics cannot occur in an invoke, so handle them here instead of in 278 // visitCallSite. 279 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 280 bool Changed = false; 281 282 // memmove/cpy/set of zero bytes is a noop. 283 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 284 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 285 286 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 287 if (CI->getZExtValue() == 1) { 288 // Replace the instruction with just byte operations. We would 289 // transform other cases to loads/stores, but we don't know if 290 // alignment is sufficient. 291 } 292 } 293 294 // If we have a memmove and the source operation is a constant global, 295 // then the source and dest pointers can't alias, so we can change this 296 // into a call to memcpy. 297 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 298 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 299 if (GVSrc->isConstant()) { 300 Module *M = CI.getParent()->getParent()->getParent(); 301 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 302 const Type *Tys[3] = { CI.getArgOperand(0)->getType(), 303 CI.getArgOperand(1)->getType(), 304 CI.getArgOperand(2)->getType() }; 305 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3)); 306 Changed = true; 307 } 308 } 309 310 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 311 // memmove(x,x,size) -> noop. 312 if (MTI->getSource() == MTI->getDest()) 313 return EraseInstFromFunction(CI); 314 } 315 316 // If we can determine a pointer alignment that is bigger than currently 317 // set, update the alignment. 318 if (isa<MemTransferInst>(MI)) { 319 if (Instruction *I = SimplifyMemTransfer(MI)) 320 return I; 321 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 322 if (Instruction *I = SimplifyMemSet(MSI)) 323 return I; 324 } 325 326 if (Changed) return II; 327 } 328 329 switch (II->getIntrinsicID()) { 330 default: break; 331 case Intrinsic::objectsize: { 332 // We need target data for just about everything so depend on it. 333 if (!TD) break; 334 335 const Type *ReturnTy = CI.getType(); 336 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 337 338 // Get to the real allocated thing and offset as fast as possible. 339 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 340 341 // If we've stripped down to a single global variable that we 342 // can know the size of then just return that. 343 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 344 if (GV->hasDefinitiveInitializer()) { 345 Constant *C = GV->getInitializer(); 346 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 347 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 348 } else { 349 // Can't determine size of the GV. 350 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 351 return ReplaceInstUsesWith(CI, RetVal); 352 } 353 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 354 // Get alloca size. 355 if (AI->getAllocatedType()->isSized()) { 356 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 357 if (AI->isArrayAllocation()) { 358 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 359 if (!C) break; 360 AllocaSize *= C->getZExtValue(); 361 } 362 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 363 } 364 } else if (CallInst *MI = extractMallocCall(Op1)) { 365 const Type* MallocType = getMallocAllocatedType(MI); 366 // Get alloca size. 367 if (MallocType && MallocType->isSized()) { 368 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 369 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 370 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 371 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 372 } 373 } 374 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 375 // Only handle constant GEPs here. 376 if (CE->getOpcode() != Instruction::GetElementPtr) break; 377 GEPOperator *GEP = cast<GEPOperator>(CE); 378 379 // Make sure we're not a constant offset from an external 380 // global. 381 Value *Operand = GEP->getPointerOperand(); 382 Operand = Operand->stripPointerCasts(); 383 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 384 if (!GV->hasDefinitiveInitializer()) break; 385 386 // Get what we're pointing to and its size. 387 const PointerType *BaseType = 388 cast<PointerType>(Operand->getType()); 389 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 390 391 // Get the current byte offset into the thing. Use the original 392 // operand in case we're looking through a bitcast. 393 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 394 const PointerType *OffsetType = 395 cast<PointerType>(GEP->getPointerOperand()->getType()); 396 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 397 398 if (Size < Offset) { 399 // Out of bound reference? Negative index normalized to large 400 // index? Just return "I don't know". 401 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 402 return ReplaceInstUsesWith(CI, RetVal); 403 } 404 405 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 406 return ReplaceInstUsesWith(CI, RetVal); 407 } 408 409 // Do not return "I don't know" here. Later optimization passes could 410 // make it possible to evaluate objectsize to a constant. 411 break; 412 } 413 case Intrinsic::bswap: 414 // bswap(bswap(x)) -> x 415 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 416 if (Operand->getIntrinsicID() == Intrinsic::bswap) 417 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 418 419 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 420 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 421 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 422 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 423 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 424 TI->getType()->getPrimitiveSizeInBits(); 425 Value *CV = ConstantInt::get(Operand->getType(), C); 426 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 427 return new TruncInst(V, TI->getType()); 428 } 429 } 430 431 break; 432 case Intrinsic::powi: 433 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 434 // powi(x, 0) -> 1.0 435 if (Power->isZero()) 436 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 437 // powi(x, 1) -> x 438 if (Power->isOne()) 439 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 440 // powi(x, -1) -> 1/x 441 if (Power->isAllOnesValue()) 442 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 443 II->getArgOperand(0)); 444 } 445 break; 446 case Intrinsic::cttz: { 447 // If all bits below the first known one are known zero, 448 // this value is constant. 449 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 450 uint32_t BitWidth = IT->getBitWidth(); 451 APInt KnownZero(BitWidth, 0); 452 APInt KnownOne(BitWidth, 0); 453 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 454 KnownZero, KnownOne); 455 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 456 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 457 if ((Mask & KnownZero) == Mask) 458 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 459 APInt(BitWidth, TrailingZeros))); 460 461 } 462 break; 463 case Intrinsic::ctlz: { 464 // If all bits above the first known one are known zero, 465 // this value is constant. 466 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 467 uint32_t BitWidth = IT->getBitWidth(); 468 APInt KnownZero(BitWidth, 0); 469 APInt KnownOne(BitWidth, 0); 470 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 471 KnownZero, KnownOne); 472 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 473 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 474 if ((Mask & KnownZero) == Mask) 475 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 476 APInt(BitWidth, LeadingZeros))); 477 478 } 479 break; 480 case Intrinsic::uadd_with_overflow: { 481 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 482 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 483 uint32_t BitWidth = IT->getBitWidth(); 484 APInt Mask = APInt::getSignBit(BitWidth); 485 APInt LHSKnownZero(BitWidth, 0); 486 APInt LHSKnownOne(BitWidth, 0); 487 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 488 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 489 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 490 491 if (LHSKnownNegative || LHSKnownPositive) { 492 APInt RHSKnownZero(BitWidth, 0); 493 APInt RHSKnownOne(BitWidth, 0); 494 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 495 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 496 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 497 if (LHSKnownNegative && RHSKnownNegative) { 498 // The sign bit is set in both cases: this MUST overflow. 499 // Create a simple add instruction, and insert it into the struct. 500 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 501 Worklist.Add(Add); 502 Constant *V[] = { 503 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 504 }; 505 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 506 return InsertValueInst::Create(Struct, Add, 0); 507 } 508 509 if (LHSKnownPositive && RHSKnownPositive) { 510 // The sign bit is clear in both cases: this CANNOT overflow. 511 // Create a simple add instruction, and insert it into the struct. 512 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 513 Worklist.Add(Add); 514 Constant *V[] = { 515 UndefValue::get(LHS->getType()), 516 ConstantInt::getFalse(II->getContext()) 517 }; 518 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 519 return InsertValueInst::Create(Struct, Add, 0); 520 } 521 } 522 } 523 // FALL THROUGH uadd into sadd 524 case Intrinsic::sadd_with_overflow: 525 // Canonicalize constants into the RHS. 526 if (isa<Constant>(II->getArgOperand(0)) && 527 !isa<Constant>(II->getArgOperand(1))) { 528 Value *LHS = II->getArgOperand(0); 529 II->setArgOperand(0, II->getArgOperand(1)); 530 II->setArgOperand(1, LHS); 531 return II; 532 } 533 534 // X + undef -> undef 535 if (isa<UndefValue>(II->getArgOperand(1))) 536 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 537 538 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 539 // X + 0 -> {X, false} 540 if (RHS->isZero()) { 541 Constant *V[] = { 542 UndefValue::get(II->getCalledValue()->getType()), 543 ConstantInt::getFalse(II->getContext()) 544 }; 545 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 546 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 547 } 548 } 549 break; 550 case Intrinsic::usub_with_overflow: 551 case Intrinsic::ssub_with_overflow: 552 // undef - X -> undef 553 // X - undef -> undef 554 if (isa<UndefValue>(II->getArgOperand(0)) || 555 isa<UndefValue>(II->getArgOperand(1))) 556 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 557 558 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 559 // X - 0 -> {X, false} 560 if (RHS->isZero()) { 561 Constant *V[] = { 562 UndefValue::get(II->getArgOperand(0)->getType()), 563 ConstantInt::getFalse(II->getContext()) 564 }; 565 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 566 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 567 } 568 } 569 break; 570 case Intrinsic::umul_with_overflow: 571 case Intrinsic::smul_with_overflow: 572 // Canonicalize constants into the RHS. 573 if (isa<Constant>(II->getArgOperand(0)) && 574 !isa<Constant>(II->getArgOperand(1))) { 575 Value *LHS = II->getArgOperand(0); 576 II->setArgOperand(0, II->getArgOperand(1)); 577 II->setArgOperand(1, LHS); 578 return II; 579 } 580 581 // X * undef -> undef 582 if (isa<UndefValue>(II->getArgOperand(1))) 583 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 584 585 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 586 // X*0 -> {0, false} 587 if (RHSI->isZero()) 588 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 589 590 // X * 1 -> {X, false} 591 if (RHSI->equalsInt(1)) { 592 Constant *V[] = { 593 UndefValue::get(II->getArgOperand(0)->getType()), 594 ConstantInt::getFalse(II->getContext()) 595 }; 596 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 597 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 598 } 599 } 600 break; 601 case Intrinsic::ppc_altivec_lvx: 602 case Intrinsic::ppc_altivec_lvxl: 603 case Intrinsic::x86_sse_loadu_ps: 604 case Intrinsic::x86_sse2_loadu_pd: 605 case Intrinsic::x86_sse2_loadu_dq: 606 // Turn PPC lvx -> load if the pointer is known aligned. 607 // Turn X86 loadups -> load if the pointer is known aligned. 608 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 609 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 610 PointerType::getUnqual(II->getType())); 611 return new LoadInst(Ptr); 612 } 613 break; 614 case Intrinsic::ppc_altivec_stvx: 615 case Intrinsic::ppc_altivec_stvxl: 616 // Turn stvx -> store if the pointer is known aligned. 617 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) { 618 const Type *OpPtrTy = 619 PointerType::getUnqual(II->getArgOperand(0)->getType()); 620 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 621 return new StoreInst(II->getArgOperand(0), Ptr); 622 } 623 break; 624 case Intrinsic::x86_sse_storeu_ps: 625 case Intrinsic::x86_sse2_storeu_pd: 626 case Intrinsic::x86_sse2_storeu_dq: 627 // Turn X86 storeu -> store if the pointer is known aligned. 628 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 629 const Type *OpPtrTy = 630 PointerType::getUnqual(II->getArgOperand(1)->getType()); 631 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 632 return new StoreInst(II->getArgOperand(1), Ptr); 633 } 634 break; 635 636 case Intrinsic::x86_sse_cvttss2si: { 637 // These intrinsics only demands the 0th element of its input vector. If 638 // we can simplify the input based on that, do so now. 639 unsigned VWidth = 640 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 641 APInt DemandedElts(VWidth, 1); 642 APInt UndefElts(VWidth, 0); 643 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 644 DemandedElts, UndefElts)) { 645 II->setArgOperand(0, V); 646 return II; 647 } 648 break; 649 } 650 651 case Intrinsic::ppc_altivec_vperm: 652 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 653 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 654 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 655 656 // Check that all of the elements are integer constants or undefs. 657 bool AllEltsOk = true; 658 for (unsigned i = 0; i != 16; ++i) { 659 if (!isa<ConstantInt>(Mask->getOperand(i)) && 660 !isa<UndefValue>(Mask->getOperand(i))) { 661 AllEltsOk = false; 662 break; 663 } 664 } 665 666 if (AllEltsOk) { 667 // Cast the input vectors to byte vectors. 668 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 669 Mask->getType()); 670 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 671 Mask->getType()); 672 Value *Result = UndefValue::get(Op0->getType()); 673 674 // Only extract each element once. 675 Value *ExtractedElts[32]; 676 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 677 678 for (unsigned i = 0; i != 16; ++i) { 679 if (isa<UndefValue>(Mask->getOperand(i))) 680 continue; 681 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 682 Idx &= 31; // Match the hardware behavior. 683 684 if (ExtractedElts[Idx] == 0) { 685 ExtractedElts[Idx] = 686 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 687 ConstantInt::get(Type::getInt32Ty(II->getContext()), 688 Idx&15, false), "tmp"); 689 } 690 691 // Insert this value into the result vector. 692 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 693 ConstantInt::get(Type::getInt32Ty(II->getContext()), 694 i, false), "tmp"); 695 } 696 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 697 } 698 } 699 break; 700 701 case Intrinsic::stackrestore: { 702 // If the save is right next to the restore, remove the restore. This can 703 // happen when variable allocas are DCE'd. 704 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 705 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 706 BasicBlock::iterator BI = SS; 707 if (&*++BI == II) 708 return EraseInstFromFunction(CI); 709 } 710 } 711 712 // Scan down this block to see if there is another stack restore in the 713 // same block without an intervening call/alloca. 714 BasicBlock::iterator BI = II; 715 TerminatorInst *TI = II->getParent()->getTerminator(); 716 bool CannotRemove = false; 717 for (++BI; &*BI != TI; ++BI) { 718 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 719 CannotRemove = true; 720 break; 721 } 722 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 723 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 724 // If there is a stackrestore below this one, remove this one. 725 if (II->getIntrinsicID() == Intrinsic::stackrestore) 726 return EraseInstFromFunction(CI); 727 // Otherwise, ignore the intrinsic. 728 } else { 729 // If we found a non-intrinsic call, we can't remove the stack 730 // restore. 731 CannotRemove = true; 732 break; 733 } 734 } 735 } 736 737 // If the stack restore is in a return/unwind block and if there are no 738 // allocas or calls between the restore and the return, nuke the restore. 739 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 740 return EraseInstFromFunction(CI); 741 break; 742 } 743 } 744 745 return visitCallSite(II); 746} 747 748// InvokeInst simplification 749// 750Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 751 return visitCallSite(&II); 752} 753 754/// isSafeToEliminateVarargsCast - If this cast does not affect the value 755/// passed through the varargs area, we can eliminate the use of the cast. 756static bool isSafeToEliminateVarargsCast(const CallSite CS, 757 const CastInst * const CI, 758 const TargetData * const TD, 759 const int ix) { 760 if (!CI->isLosslessCast()) 761 return false; 762 763 // The size of ByVal arguments is derived from the type, so we 764 // can't change to a type with a different size. If the size were 765 // passed explicitly we could avoid this check. 766 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 767 return true; 768 769 const Type* SrcTy = 770 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 771 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 772 if (!SrcTy->isSized() || !DstTy->isSized()) 773 return false; 774 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 775 return false; 776 return true; 777} 778 779namespace { 780class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 781 InstCombiner *IC; 782protected: 783 void replaceCall(Value *With) { 784 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 785 } 786 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 787 if (ConstantInt *SizeCI = 788 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 789 if (SizeCI->isAllOnesValue()) 790 return true; 791 if (isString) 792 return SizeCI->getZExtValue() >= 793 GetStringLength(CI->getArgOperand(SizeArgOp)); 794 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 795 CI->getArgOperand(SizeArgOp))) 796 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 797 } 798 return false; 799 } 800public: 801 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 802 Instruction *NewInstruction; 803}; 804} // end anonymous namespace 805 806// Try to fold some different type of calls here. 807// Currently we're only working with the checking functions, memcpy_chk, 808// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 809// strcat_chk and strncat_chk. 810Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 811 if (CI->getCalledFunction() == 0) return 0; 812 813 InstCombineFortifiedLibCalls Simplifier(this); 814 Simplifier.fold(CI, TD); 815 return Simplifier.NewInstruction; 816} 817 818// visitCallSite - Improvements for call and invoke instructions. 819// 820Instruction *InstCombiner::visitCallSite(CallSite CS) { 821 bool Changed = false; 822 823 // If the callee is a constexpr cast of a function, attempt to move the cast 824 // to the arguments of the call/invoke. 825 if (transformConstExprCastCall(CS)) return 0; 826 827 Value *Callee = CS.getCalledValue(); 828 829 if (Function *CalleeF = dyn_cast<Function>(Callee)) 830 // If the call and callee calling conventions don't match, this call must 831 // be unreachable, as the call is undefined. 832 if (CalleeF->getCallingConv() != CS.getCallingConv() && 833 // Only do this for calls to a function with a body. A prototype may 834 // not actually end up matching the implementation's calling conv for a 835 // variety of reasons (e.g. it may be written in assembly). 836 !CalleeF->isDeclaration()) { 837 Instruction *OldCall = CS.getInstruction(); 838 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 839 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 840 OldCall); 841 // If OldCall dues not return void then replaceAllUsesWith undef. 842 // This allows ValueHandlers and custom metadata to adjust itself. 843 if (!OldCall->getType()->isVoidTy()) 844 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 845 if (isa<CallInst>(OldCall)) 846 return EraseInstFromFunction(*OldCall); 847 848 // We cannot remove an invoke, because it would change the CFG, just 849 // change the callee to a null pointer. 850 cast<InvokeInst>(OldCall)->setCalledFunction( 851 Constant::getNullValue(CalleeF->getType())); 852 return 0; 853 } 854 855 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 856 // This instruction is not reachable, just remove it. We insert a store to 857 // undef so that we know that this code is not reachable, despite the fact 858 // that we can't modify the CFG here. 859 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 860 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 861 CS.getInstruction()); 862 863 // If CS does not return void then replaceAllUsesWith undef. 864 // This allows ValueHandlers and custom metadata to adjust itself. 865 if (!CS.getInstruction()->getType()->isVoidTy()) 866 CS.getInstruction()-> 867 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 868 869 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 870 // Don't break the CFG, insert a dummy cond branch. 871 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 872 ConstantInt::getTrue(Callee->getContext()), II); 873 } 874 return EraseInstFromFunction(*CS.getInstruction()); 875 } 876 877 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 878 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 879 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 880 return transformCallThroughTrampoline(CS); 881 882 const PointerType *PTy = cast<PointerType>(Callee->getType()); 883 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 884 if (FTy->isVarArg()) { 885 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 886 // See if we can optimize any arguments passed through the varargs area of 887 // the call. 888 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 889 E = CS.arg_end(); I != E; ++I, ++ix) { 890 CastInst *CI = dyn_cast<CastInst>(*I); 891 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 892 *I = CI->getOperand(0); 893 Changed = true; 894 } 895 } 896 } 897 898 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 899 // Inline asm calls cannot throw - mark them 'nounwind'. 900 CS.setDoesNotThrow(); 901 Changed = true; 902 } 903 904 // Try to optimize the call if possible, we require TargetData for most of 905 // this. None of these calls are seen as possibly dead so go ahead and 906 // delete the instruction now. 907 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 908 Instruction *I = tryOptimizeCall(CI, TD); 909 // If we changed something return the result, etc. Otherwise let 910 // the fallthrough check. 911 if (I) return EraseInstFromFunction(*I); 912 } 913 914 return Changed ? CS.getInstruction() : 0; 915} 916 917// transformConstExprCastCall - If the callee is a constexpr cast of a function, 918// attempt to move the cast to the arguments of the call/invoke. 919// 920bool InstCombiner::transformConstExprCastCall(CallSite CS) { 921 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 922 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 923 if (CE->getOpcode() != Instruction::BitCast || 924 !isa<Function>(CE->getOperand(0))) 925 return false; 926 Function *Callee = cast<Function>(CE->getOperand(0)); 927 Instruction *Caller = CS.getInstruction(); 928 const AttrListPtr &CallerPAL = CS.getAttributes(); 929 930 // Okay, this is a cast from a function to a different type. Unless doing so 931 // would cause a type conversion of one of our arguments, change this call to 932 // be a direct call with arguments casted to the appropriate types. 933 // 934 const FunctionType *FT = Callee->getFunctionType(); 935 const Type *OldRetTy = Caller->getType(); 936 const Type *NewRetTy = FT->getReturnType(); 937 938 if (NewRetTy->isStructTy()) 939 return false; // TODO: Handle multiple return values. 940 941 // Check to see if we are changing the return type... 942 if (OldRetTy != NewRetTy) { 943 if (Callee->isDeclaration() && 944 // Conversion is ok if changing from one pointer type to another or from 945 // a pointer to an integer of the same size. 946 !((OldRetTy->isPointerTy() || !TD || 947 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 948 (NewRetTy->isPointerTy() || !TD || 949 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 950 return false; // Cannot transform this return value. 951 952 if (!Caller->use_empty() && 953 // void -> non-void is handled specially 954 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 955 return false; // Cannot transform this return value. 956 957 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 958 Attributes RAttrs = CallerPAL.getRetAttributes(); 959 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 960 return false; // Attribute not compatible with transformed value. 961 } 962 963 // If the callsite is an invoke instruction, and the return value is used by 964 // a PHI node in a successor, we cannot change the return type of the call 965 // because there is no place to put the cast instruction (without breaking 966 // the critical edge). Bail out in this case. 967 if (!Caller->use_empty()) 968 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 969 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 970 UI != E; ++UI) 971 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 972 if (PN->getParent() == II->getNormalDest() || 973 PN->getParent() == II->getUnwindDest()) 974 return false; 975 } 976 977 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 978 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 979 980 CallSite::arg_iterator AI = CS.arg_begin(); 981 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 982 const Type *ParamTy = FT->getParamType(i); 983 const Type *ActTy = (*AI)->getType(); 984 985 if (!CastInst::isCastable(ActTy, ParamTy)) 986 return false; // Cannot transform this parameter value. 987 988 if (CallerPAL.getParamAttributes(i + 1) 989 & Attribute::typeIncompatible(ParamTy)) 990 return false; // Attribute not compatible with transformed value. 991 992 // Converting from one pointer type to another or between a pointer and an 993 // integer of the same size is safe even if we do not have a body. 994 bool isConvertible = ActTy == ParamTy || 995 (TD && ((ParamTy->isPointerTy() || 996 ParamTy == TD->getIntPtrType(Caller->getContext())) && 997 (ActTy->isPointerTy() || 998 ActTy == TD->getIntPtrType(Caller->getContext())))); 999 if (Callee->isDeclaration() && !isConvertible) return false; 1000 } 1001 1002 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 1003 Callee->isDeclaration()) 1004 return false; // Do not delete arguments unless we have a function body. 1005 1006 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 1007 !CallerPAL.isEmpty()) 1008 // In this case we have more arguments than the new function type, but we 1009 // won't be dropping them. Check that these extra arguments have attributes 1010 // that are compatible with being a vararg call argument. 1011 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 1012 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 1013 break; 1014 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1015 if (PAttrs & Attribute::VarArgsIncompatible) 1016 return false; 1017 } 1018 1019 // Okay, we decided that this is a safe thing to do: go ahead and start 1020 // inserting cast instructions as necessary... 1021 std::vector<Value*> Args; 1022 Args.reserve(NumActualArgs); 1023 SmallVector<AttributeWithIndex, 8> attrVec; 1024 attrVec.reserve(NumCommonArgs); 1025 1026 // Get any return attributes. 1027 Attributes RAttrs = CallerPAL.getRetAttributes(); 1028 1029 // If the return value is not being used, the type may not be compatible 1030 // with the existing attributes. Wipe out any problematic attributes. 1031 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1032 1033 // Add the new return attributes. 1034 if (RAttrs) 1035 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1036 1037 AI = CS.arg_begin(); 1038 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1039 const Type *ParamTy = FT->getParamType(i); 1040 if ((*AI)->getType() == ParamTy) { 1041 Args.push_back(*AI); 1042 } else { 1043 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1044 false, ParamTy, false); 1045 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1046 } 1047 1048 // Add any parameter attributes. 1049 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1050 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1051 } 1052 1053 // If the function takes more arguments than the call was taking, add them 1054 // now. 1055 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1056 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1057 1058 // If we are removing arguments to the function, emit an obnoxious warning. 1059 if (FT->getNumParams() < NumActualArgs) { 1060 if (!FT->isVarArg()) { 1061 errs() << "WARNING: While resolving call to function '" 1062 << Callee->getName() << "' arguments were dropped!\n"; 1063 } else { 1064 // Add all of the arguments in their promoted form to the arg list. 1065 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1066 const Type *PTy = getPromotedType((*AI)->getType()); 1067 if (PTy != (*AI)->getType()) { 1068 // Must promote to pass through va_arg area! 1069 Instruction::CastOps opcode = 1070 CastInst::getCastOpcode(*AI, false, PTy, false); 1071 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1072 } else { 1073 Args.push_back(*AI); 1074 } 1075 1076 // Add any parameter attributes. 1077 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1078 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1079 } 1080 } 1081 } 1082 1083 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1084 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1085 1086 if (NewRetTy->isVoidTy()) 1087 Caller->setName(""); // Void type should not have a name. 1088 1089 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1090 attrVec.end()); 1091 1092 Instruction *NC; 1093 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1094 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1095 Args.begin(), Args.end(), 1096 Caller->getName(), Caller); 1097 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1098 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1099 } else { 1100 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1101 Caller->getName(), Caller); 1102 CallInst *CI = cast<CallInst>(Caller); 1103 if (CI->isTailCall()) 1104 cast<CallInst>(NC)->setTailCall(); 1105 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1106 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1107 } 1108 1109 // Insert a cast of the return type as necessary. 1110 Value *NV = NC; 1111 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1112 if (!NV->getType()->isVoidTy()) { 1113 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1114 OldRetTy, false); 1115 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1116 1117 // If this is an invoke instruction, we should insert it after the first 1118 // non-phi, instruction in the normal successor block. 1119 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1120 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1121 InsertNewInstBefore(NC, *I); 1122 } else { 1123 // Otherwise, it's a call, just insert cast right after the call instr 1124 InsertNewInstBefore(NC, *Caller); 1125 } 1126 Worklist.AddUsersToWorkList(*Caller); 1127 } else { 1128 NV = UndefValue::get(Caller->getType()); 1129 } 1130 } 1131 1132 1133 if (!Caller->use_empty()) 1134 Caller->replaceAllUsesWith(NV); 1135 1136 EraseInstFromFunction(*Caller); 1137 return true; 1138} 1139 1140// transformCallThroughTrampoline - Turn a call to a function created by the 1141// init_trampoline intrinsic into a direct call to the underlying function. 1142// 1143Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1144 Value *Callee = CS.getCalledValue(); 1145 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1146 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1147 const AttrListPtr &Attrs = CS.getAttributes(); 1148 1149 // If the call already has the 'nest' attribute somewhere then give up - 1150 // otherwise 'nest' would occur twice after splicing in the chain. 1151 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1152 return 0; 1153 1154 IntrinsicInst *Tramp = 1155 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1156 1157 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1158 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1159 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1160 1161 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1162 if (!NestAttrs.isEmpty()) { 1163 unsigned NestIdx = 1; 1164 const Type *NestTy = 0; 1165 Attributes NestAttr = Attribute::None; 1166 1167 // Look for a parameter marked with the 'nest' attribute. 1168 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1169 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1170 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1171 // Record the parameter type and any other attributes. 1172 NestTy = *I; 1173 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1174 break; 1175 } 1176 1177 if (NestTy) { 1178 Instruction *Caller = CS.getInstruction(); 1179 std::vector<Value*> NewArgs; 1180 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1181 1182 SmallVector<AttributeWithIndex, 8> NewAttrs; 1183 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1184 1185 // Insert the nest argument into the call argument list, which may 1186 // mean appending it. Likewise for attributes. 1187 1188 // Add any result attributes. 1189 if (Attributes Attr = Attrs.getRetAttributes()) 1190 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1191 1192 { 1193 unsigned Idx = 1; 1194 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1195 do { 1196 if (Idx == NestIdx) { 1197 // Add the chain argument and attributes. 1198 Value *NestVal = Tramp->getArgOperand(2); 1199 if (NestVal->getType() != NestTy) 1200 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1201 NewArgs.push_back(NestVal); 1202 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1203 } 1204 1205 if (I == E) 1206 break; 1207 1208 // Add the original argument and attributes. 1209 NewArgs.push_back(*I); 1210 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1211 NewAttrs.push_back 1212 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1213 1214 ++Idx, ++I; 1215 } while (1); 1216 } 1217 1218 // Add any function attributes. 1219 if (Attributes Attr = Attrs.getFnAttributes()) 1220 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1221 1222 // The trampoline may have been bitcast to a bogus type (FTy). 1223 // Handle this by synthesizing a new function type, equal to FTy 1224 // with the chain parameter inserted. 1225 1226 std::vector<const Type*> NewTypes; 1227 NewTypes.reserve(FTy->getNumParams()+1); 1228 1229 // Insert the chain's type into the list of parameter types, which may 1230 // mean appending it. 1231 { 1232 unsigned Idx = 1; 1233 FunctionType::param_iterator I = FTy->param_begin(), 1234 E = FTy->param_end(); 1235 1236 do { 1237 if (Idx == NestIdx) 1238 // Add the chain's type. 1239 NewTypes.push_back(NestTy); 1240 1241 if (I == E) 1242 break; 1243 1244 // Add the original type. 1245 NewTypes.push_back(*I); 1246 1247 ++Idx, ++I; 1248 } while (1); 1249 } 1250 1251 // Replace the trampoline call with a direct call. Let the generic 1252 // code sort out any function type mismatches. 1253 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1254 FTy->isVarArg()); 1255 Constant *NewCallee = 1256 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1257 NestF : ConstantExpr::getBitCast(NestF, 1258 PointerType::getUnqual(NewFTy)); 1259 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1260 NewAttrs.end()); 1261 1262 Instruction *NewCaller; 1263 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1264 NewCaller = InvokeInst::Create(NewCallee, 1265 II->getNormalDest(), II->getUnwindDest(), 1266 NewArgs.begin(), NewArgs.end(), 1267 Caller->getName(), Caller); 1268 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1269 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1270 } else { 1271 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1272 Caller->getName(), Caller); 1273 if (cast<CallInst>(Caller)->isTailCall()) 1274 cast<CallInst>(NewCaller)->setTailCall(); 1275 cast<CallInst>(NewCaller)-> 1276 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1277 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1278 } 1279 if (!Caller->getType()->isVoidTy()) 1280 Caller->replaceAllUsesWith(NewCaller); 1281 Caller->eraseFromParent(); 1282 Worklist.Remove(Caller); 1283 return 0; 1284 } 1285 } 1286 1287 // Replace the trampoline call with a direct call. Since there is no 'nest' 1288 // parameter, there is no need to adjust the argument list. Let the generic 1289 // code sort out any function type mismatches. 1290 Constant *NewCallee = 1291 NestF->getType() == PTy ? NestF : 1292 ConstantExpr::getBitCast(NestF, PTy); 1293 CS.setCalledFunction(NewCallee); 1294 return CS.getInstruction(); 1295} 1296 1297