InstCombineCalls.cpp revision 26b482d7a76df3f67675ce852daed0eba709c63e
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/Transforms/Utils/BuildLibCalls.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static const Type *getPromotedType(const Type *Ty) { 25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32/// EnforceKnownAlignment - If the specified pointer points to an object that 33/// we control, modify the object's alignment to PrefAlign. This isn't 34/// often possible though. If alignment is important, a more reliable approach 35/// is to simply align all global variables and allocation instructions to 36/// their preferred alignment from the beginning. 37/// 38static unsigned EnforceKnownAlignment(Value *V, 39 unsigned Align, unsigned PrefAlign) { 40 41 User *U = dyn_cast<User>(V); 42 if (!U) return Align; 43 44 switch (Operator::getOpcode(U)) { 45 default: break; 46 case Instruction::BitCast: 47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 48 case Instruction::GetElementPtr: { 49 // If all indexes are zero, it is just the alignment of the base pointer. 50 bool AllZeroOperands = true; 51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 52 if (!isa<Constant>(*i) || 53 !cast<Constant>(*i)->isNullValue()) { 54 AllZeroOperands = false; 55 break; 56 } 57 58 if (AllZeroOperands) { 59 // Treat this like a bitcast. 60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 61 } 62 return Align; 63 } 64 case Instruction::Alloca: { 65 AllocaInst *AI = cast<AllocaInst>(V); 66 // If there is a requested alignment and if this is an alloca, round up. 67 if (AI->getAlignment() >= PrefAlign) 68 return AI->getAlignment(); 69 AI->setAlignment(PrefAlign); 70 return PrefAlign; 71 } 72 } 73 74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 75 // If there is a large requested alignment and we can, bump up the alignment 76 // of the global. 77 if (GV->isDeclaration()) return Align; 78 79 if (GV->getAlignment() >= PrefAlign) 80 return GV->getAlignment(); 81 // We can only increase the alignment of the global if it has no alignment 82 // specified or if it is not assigned a section. If it is assigned a 83 // section, the global could be densely packed with other objects in the 84 // section, increasing the alignment could cause padding issues. 85 if (!GV->hasSection() || GV->getAlignment() == 0) 86 GV->setAlignment(PrefAlign); 87 return GV->getAlignment(); 88 } 89 90 return Align; 91} 92 93/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 94/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 95/// and it is more than the alignment of the ultimate object, see if we can 96/// increase the alignment of the ultimate object, making this check succeed. 97unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 98 unsigned PrefAlign) { 99 assert(V->getType()->isPointerTy() && 100 "GetOrEnforceKnownAlignment expects a pointer!"); 101 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; 102 APInt Mask = APInt::getAllOnesValue(BitWidth); 103 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 104 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 105 unsigned TrailZ = KnownZero.countTrailingOnes(); 106 107 // Avoid trouble with rediculously large TrailZ values, such as 108 // those computed from a null pointer. 109 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 110 111 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 112 113 // LLVM doesn't support alignments larger than this currently. 114 Align = std::min(Align, +Value::MaximumAlignment); 115 116 if (PrefAlign > Align) 117 Align = EnforceKnownAlignment(V, Align, PrefAlign); 118 119 // We don't need to make any adjustment. 120 return Align; 121} 122 123Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 124 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0)); 125 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1)); 126 unsigned MinAlign = std::min(DstAlign, SrcAlign); 127 unsigned CopyAlign = MI->getAlignment(); 128 129 if (CopyAlign < MinAlign) { 130 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 131 MinAlign, false)); 132 return MI; 133 } 134 135 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 136 // load/store. 137 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 138 if (MemOpLength == 0) return 0; 139 140 // Source and destination pointer types are always "i8*" for intrinsic. See 141 // if the size is something we can handle with a single primitive load/store. 142 // A single load+store correctly handles overlapping memory in the memmove 143 // case. 144 unsigned Size = MemOpLength->getZExtValue(); 145 if (Size == 0) return MI; // Delete this mem transfer. 146 147 if (Size > 8 || (Size&(Size-1))) 148 return 0; // If not 1/2/4/8 bytes, exit. 149 150 // Use an integer load+store unless we can find something better. 151 unsigned SrcAddrSp = 152 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 153 unsigned DstAddrSp = 154 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 155 156 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 157 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 158 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 159 160 // Memcpy forces the use of i8* for the source and destination. That means 161 // that if you're using memcpy to move one double around, you'll get a cast 162 // from double* to i8*. We'd much rather use a double load+store rather than 163 // an i64 load+store, here because this improves the odds that the source or 164 // dest address will be promotable. See if we can find a better type than the 165 // integer datatype. 166 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 167 if (StrippedDest != MI->getArgOperand(0)) { 168 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 169 ->getElementType(); 170 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 171 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 172 // down through these levels if so. 173 while (!SrcETy->isSingleValueType()) { 174 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 175 if (STy->getNumElements() == 1) 176 SrcETy = STy->getElementType(0); 177 else 178 break; 179 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 180 if (ATy->getNumElements() == 1) 181 SrcETy = ATy->getElementType(); 182 else 183 break; 184 } else 185 break; 186 } 187 188 if (SrcETy->isSingleValueType()) { 189 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 190 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 191 } 192 } 193 } 194 195 196 // If the memcpy/memmove provides better alignment info than we can 197 // infer, use it. 198 SrcAlign = std::max(SrcAlign, CopyAlign); 199 DstAlign = std::max(DstAlign, CopyAlign); 200 201 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 202 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 203 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign); 204 InsertNewInstBefore(L, *MI); 205 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign), 206 *MI); 207 208 // Set the size of the copy to 0, it will be deleted on the next iteration. 209 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 210 return MI; 211} 212 213Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 214 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 215 if (MI->getAlignment() < Alignment) { 216 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 217 Alignment, false)); 218 return MI; 219 } 220 221 // Extract the length and alignment and fill if they are constant. 222 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 223 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 224 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 225 return 0; 226 uint64_t Len = LenC->getZExtValue(); 227 Alignment = MI->getAlignment(); 228 229 // If the length is zero, this is a no-op 230 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 231 232 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 233 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 234 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 235 236 Value *Dest = MI->getDest(); 237 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 238 239 // Alignment 0 is identity for alignment 1 for memset, but not store. 240 if (Alignment == 0) Alignment = 1; 241 242 // Extract the fill value and store. 243 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 244 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 245 Dest, false, Alignment), *MI); 246 247 // Set the size of the copy to 0, it will be deleted on the next iteration. 248 MI->setLength(Constant::getNullValue(LenC->getType())); 249 return MI; 250 } 251 252 return 0; 253} 254 255/// visitCallInst - CallInst simplification. This mostly only handles folding 256/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 257/// the heavy lifting. 258/// 259Instruction *InstCombiner::visitCallInst(CallInst &CI) { 260 if (isFreeCall(&CI)) 261 return visitFree(CI); 262 if (isMalloc(&CI)) 263 return visitMalloc(CI); 264 265 // If the caller function is nounwind, mark the call as nounwind, even if the 266 // callee isn't. 267 if (CI.getParent()->getParent()->doesNotThrow() && 268 !CI.doesNotThrow()) { 269 CI.setDoesNotThrow(); 270 return &CI; 271 } 272 273 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 274 if (!II) return visitCallSite(&CI); 275 276 // Intrinsics cannot occur in an invoke, so handle them here instead of in 277 // visitCallSite. 278 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 279 bool Changed = false; 280 281 // memmove/cpy/set of zero bytes is a noop. 282 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 283 if (NumBytes->isNullValue()) 284 return EraseInstFromFunction(CI); 285 286 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 287 if (CI->getZExtValue() == 1) { 288 // Replace the instruction with just byte operations. We would 289 // transform other cases to loads/stores, but we don't know if 290 // alignment is sufficient. 291 } 292 } 293 294 // No other transformations apply to volatile transfers. 295 if (MI->isVolatile()) 296 return 0; 297 298 // If we have a memmove and the source operation is a constant global, 299 // then the source and dest pointers can't alias, so we can change this 300 // into a call to memcpy. 301 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 302 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 303 if (GVSrc->isConstant()) { 304 Module *M = CI.getParent()->getParent()->getParent(); 305 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 306 const Type *Tys[3] = { CI.getArgOperand(0)->getType(), 307 CI.getArgOperand(1)->getType(), 308 CI.getArgOperand(2)->getType() }; 309 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3)); 310 Changed = true; 311 } 312 } 313 314 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 315 // memmove(x,x,size) -> noop. 316 if (MTI->getSource() == MTI->getDest()) 317 return EraseInstFromFunction(CI); 318 } 319 320 // If we can determine a pointer alignment that is bigger than currently 321 // set, update the alignment. 322 if (isa<MemTransferInst>(MI)) { 323 if (Instruction *I = SimplifyMemTransfer(MI)) 324 return I; 325 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 326 if (Instruction *I = SimplifyMemSet(MSI)) 327 return I; 328 } 329 330 if (Changed) return II; 331 } 332 333 switch (II->getIntrinsicID()) { 334 default: break; 335 case Intrinsic::objectsize: { 336 // We need target data for just about everything so depend on it. 337 if (!TD) break; 338 339 const Type *ReturnTy = CI.getType(); 340 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 341 342 // Get to the real allocated thing and offset as fast as possible. 343 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 344 345 // If we've stripped down to a single global variable that we 346 // can know the size of then just return that. 347 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 348 if (GV->hasDefinitiveInitializer()) { 349 Constant *C = GV->getInitializer(); 350 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 351 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 352 } else { 353 // Can't determine size of the GV. 354 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 355 return ReplaceInstUsesWith(CI, RetVal); 356 } 357 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 358 // Get alloca size. 359 if (AI->getAllocatedType()->isSized()) { 360 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 361 if (AI->isArrayAllocation()) { 362 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 363 if (!C) break; 364 AllocaSize *= C->getZExtValue(); 365 } 366 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 367 } 368 } else if (CallInst *MI = extractMallocCall(Op1)) { 369 const Type* MallocType = getMallocAllocatedType(MI); 370 // Get alloca size. 371 if (MallocType && MallocType->isSized()) { 372 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 373 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 374 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 375 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 376 } 377 } 378 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 379 // Only handle constant GEPs here. 380 if (CE->getOpcode() != Instruction::GetElementPtr) break; 381 GEPOperator *GEP = cast<GEPOperator>(CE); 382 383 // Make sure we're not a constant offset from an external 384 // global. 385 Value *Operand = GEP->getPointerOperand(); 386 Operand = Operand->stripPointerCasts(); 387 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 388 if (!GV->hasDefinitiveInitializer()) break; 389 390 // Get what we're pointing to and its size. 391 const PointerType *BaseType = 392 cast<PointerType>(Operand->getType()); 393 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 394 395 // Get the current byte offset into the thing. Use the original 396 // operand in case we're looking through a bitcast. 397 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 398 const PointerType *OffsetType = 399 cast<PointerType>(GEP->getPointerOperand()->getType()); 400 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 401 402 if (Size < Offset) { 403 // Out of bound reference? Negative index normalized to large 404 // index? Just return "I don't know". 405 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 406 return ReplaceInstUsesWith(CI, RetVal); 407 } 408 409 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 410 return ReplaceInstUsesWith(CI, RetVal); 411 } 412 413 // Do not return "I don't know" here. Later optimization passes could 414 // make it possible to evaluate objectsize to a constant. 415 break; 416 } 417 case Intrinsic::bswap: 418 // bswap(bswap(x)) -> x 419 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 420 if (Operand->getIntrinsicID() == Intrinsic::bswap) 421 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 422 423 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 424 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 425 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 426 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 427 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 428 TI->getType()->getPrimitiveSizeInBits(); 429 Value *CV = ConstantInt::get(Operand->getType(), C); 430 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 431 return new TruncInst(V, TI->getType()); 432 } 433 } 434 435 break; 436 case Intrinsic::powi: 437 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 438 // powi(x, 0) -> 1.0 439 if (Power->isZero()) 440 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 441 // powi(x, 1) -> x 442 if (Power->isOne()) 443 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 444 // powi(x, -1) -> 1/x 445 if (Power->isAllOnesValue()) 446 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 447 II->getArgOperand(0)); 448 } 449 break; 450 case Intrinsic::cttz: { 451 // If all bits below the first known one are known zero, 452 // this value is constant. 453 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 454 uint32_t BitWidth = IT->getBitWidth(); 455 APInt KnownZero(BitWidth, 0); 456 APInt KnownOne(BitWidth, 0); 457 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 458 KnownZero, KnownOne); 459 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 460 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 461 if ((Mask & KnownZero) == Mask) 462 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 463 APInt(BitWidth, TrailingZeros))); 464 465 } 466 break; 467 case Intrinsic::ctlz: { 468 // If all bits above the first known one are known zero, 469 // this value is constant. 470 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 471 uint32_t BitWidth = IT->getBitWidth(); 472 APInt KnownZero(BitWidth, 0); 473 APInt KnownOne(BitWidth, 0); 474 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 475 KnownZero, KnownOne); 476 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 477 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 478 if ((Mask & KnownZero) == Mask) 479 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 480 APInt(BitWidth, LeadingZeros))); 481 482 } 483 break; 484 case Intrinsic::uadd_with_overflow: { 485 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 486 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 487 uint32_t BitWidth = IT->getBitWidth(); 488 APInt Mask = APInt::getSignBit(BitWidth); 489 APInt LHSKnownZero(BitWidth, 0); 490 APInt LHSKnownOne(BitWidth, 0); 491 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 492 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 493 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 494 495 if (LHSKnownNegative || LHSKnownPositive) { 496 APInt RHSKnownZero(BitWidth, 0); 497 APInt RHSKnownOne(BitWidth, 0); 498 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 499 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 500 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 501 if (LHSKnownNegative && RHSKnownNegative) { 502 // The sign bit is set in both cases: this MUST overflow. 503 // Create a simple add instruction, and insert it into the struct. 504 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 505 Worklist.Add(Add); 506 Constant *V[] = { 507 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 508 }; 509 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 510 return InsertValueInst::Create(Struct, Add, 0); 511 } 512 513 if (LHSKnownPositive && RHSKnownPositive) { 514 // The sign bit is clear in both cases: this CANNOT overflow. 515 // Create a simple add instruction, and insert it into the struct. 516 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 517 Worklist.Add(Add); 518 Constant *V[] = { 519 UndefValue::get(LHS->getType()), 520 ConstantInt::getFalse(II->getContext()) 521 }; 522 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 523 return InsertValueInst::Create(Struct, Add, 0); 524 } 525 } 526 527 // If the normal result of the add is dead, and the RHS is a constant, we 528 // can transform this into a range comparison. 529 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 530 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) 531 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(II->use_back())) 532 if (II->hasOneUse() && EVI->getNumIndices() == 1 && !EVI->use_empty() && 533 *EVI->idx_begin() == 1) { // Extract of overflow result. 534 Builder->SetInsertPoint(EVI); 535 Value *R = Builder->CreateICmpUGT(LHS, ConstantExpr::getNot(CI)); 536 R->takeName(EVI); 537 ReplaceInstUsesWith(*EVI, R); 538 return II; 539 } 540 541 } 542 // FALL THROUGH uadd into sadd 543 case Intrinsic::sadd_with_overflow: 544 // Canonicalize constants into the RHS. 545 if (isa<Constant>(II->getArgOperand(0)) && 546 !isa<Constant>(II->getArgOperand(1))) { 547 Value *LHS = II->getArgOperand(0); 548 II->setArgOperand(0, II->getArgOperand(1)); 549 II->setArgOperand(1, LHS); 550 return II; 551 } 552 553 // X + undef -> undef 554 if (isa<UndefValue>(II->getArgOperand(1))) 555 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 556 557 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 558 // X + 0 -> {X, false} 559 if (RHS->isZero()) { 560 Constant *V[] = { 561 UndefValue::get(II->getArgOperand(0)->getType()), 562 ConstantInt::getFalse(II->getContext()) 563 }; 564 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 565 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 566 } 567 } 568 569 break; 570 case Intrinsic::usub_with_overflow: 571 case Intrinsic::ssub_with_overflow: 572 // undef - X -> undef 573 // X - undef -> undef 574 if (isa<UndefValue>(II->getArgOperand(0)) || 575 isa<UndefValue>(II->getArgOperand(1))) 576 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 577 578 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 579 // X - 0 -> {X, false} 580 if (RHS->isZero()) { 581 Constant *V[] = { 582 UndefValue::get(II->getArgOperand(0)->getType()), 583 ConstantInt::getFalse(II->getContext()) 584 }; 585 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 586 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 587 } 588 } 589 break; 590 case Intrinsic::umul_with_overflow: 591 case Intrinsic::smul_with_overflow: 592 // Canonicalize constants into the RHS. 593 if (isa<Constant>(II->getArgOperand(0)) && 594 !isa<Constant>(II->getArgOperand(1))) { 595 Value *LHS = II->getArgOperand(0); 596 II->setArgOperand(0, II->getArgOperand(1)); 597 II->setArgOperand(1, LHS); 598 return II; 599 } 600 601 // X * undef -> undef 602 if (isa<UndefValue>(II->getArgOperand(1))) 603 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 604 605 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 606 // X*0 -> {0, false} 607 if (RHSI->isZero()) 608 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 609 610 // X * 1 -> {X, false} 611 if (RHSI->equalsInt(1)) { 612 Constant *V[] = { 613 UndefValue::get(II->getArgOperand(0)->getType()), 614 ConstantInt::getFalse(II->getContext()) 615 }; 616 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 617 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 618 } 619 } 620 break; 621 case Intrinsic::ppc_altivec_lvx: 622 case Intrinsic::ppc_altivec_lvxl: 623 case Intrinsic::x86_sse_loadu_ps: 624 case Intrinsic::x86_sse2_loadu_pd: 625 case Intrinsic::x86_sse2_loadu_dq: 626 // Turn PPC lvx -> load if the pointer is known aligned. 627 // Turn X86 loadups -> load if the pointer is known aligned. 628 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 629 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 630 PointerType::getUnqual(II->getType())); 631 return new LoadInst(Ptr); 632 } 633 break; 634 case Intrinsic::ppc_altivec_stvx: 635 case Intrinsic::ppc_altivec_stvxl: 636 // Turn stvx -> store if the pointer is known aligned. 637 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) { 638 const Type *OpPtrTy = 639 PointerType::getUnqual(II->getArgOperand(0)->getType()); 640 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 641 return new StoreInst(II->getArgOperand(0), Ptr); 642 } 643 break; 644 case Intrinsic::x86_sse_storeu_ps: 645 case Intrinsic::x86_sse2_storeu_pd: 646 case Intrinsic::x86_sse2_storeu_dq: 647 // Turn X86 storeu -> store if the pointer is known aligned. 648 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 649 const Type *OpPtrTy = 650 PointerType::getUnqual(II->getArgOperand(1)->getType()); 651 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 652 return new StoreInst(II->getArgOperand(1), Ptr); 653 } 654 break; 655 656 case Intrinsic::x86_sse_cvttss2si: { 657 // These intrinsics only demands the 0th element of its input vector. If 658 // we can simplify the input based on that, do so now. 659 unsigned VWidth = 660 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 661 APInt DemandedElts(VWidth, 1); 662 APInt UndefElts(VWidth, 0); 663 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 664 DemandedElts, UndefElts)) { 665 II->setArgOperand(0, V); 666 return II; 667 } 668 break; 669 } 670 671 case Intrinsic::ppc_altivec_vperm: 672 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 673 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 674 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 675 676 // Check that all of the elements are integer constants or undefs. 677 bool AllEltsOk = true; 678 for (unsigned i = 0; i != 16; ++i) { 679 if (!isa<ConstantInt>(Mask->getOperand(i)) && 680 !isa<UndefValue>(Mask->getOperand(i))) { 681 AllEltsOk = false; 682 break; 683 } 684 } 685 686 if (AllEltsOk) { 687 // Cast the input vectors to byte vectors. 688 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 689 Mask->getType()); 690 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 691 Mask->getType()); 692 Value *Result = UndefValue::get(Op0->getType()); 693 694 // Only extract each element once. 695 Value *ExtractedElts[32]; 696 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 697 698 for (unsigned i = 0; i != 16; ++i) { 699 if (isa<UndefValue>(Mask->getOperand(i))) 700 continue; 701 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 702 Idx &= 31; // Match the hardware behavior. 703 704 if (ExtractedElts[Idx] == 0) { 705 ExtractedElts[Idx] = 706 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 707 ConstantInt::get(Type::getInt32Ty(II->getContext()), 708 Idx&15, false), "tmp"); 709 } 710 711 // Insert this value into the result vector. 712 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 713 ConstantInt::get(Type::getInt32Ty(II->getContext()), 714 i, false), "tmp"); 715 } 716 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 717 } 718 } 719 break; 720 721 case Intrinsic::arm_neon_vld1: 722 case Intrinsic::arm_neon_vld2: 723 case Intrinsic::arm_neon_vld3: 724 case Intrinsic::arm_neon_vld4: 725 case Intrinsic::arm_neon_vld2lane: 726 case Intrinsic::arm_neon_vld3lane: 727 case Intrinsic::arm_neon_vld4lane: 728 case Intrinsic::arm_neon_vst1: 729 case Intrinsic::arm_neon_vst2: 730 case Intrinsic::arm_neon_vst3: 731 case Intrinsic::arm_neon_vst4: 732 case Intrinsic::arm_neon_vst2lane: 733 case Intrinsic::arm_neon_vst3lane: 734 case Intrinsic::arm_neon_vst4lane: { 735 unsigned MemAlign = GetOrEnforceKnownAlignment(II->getArgOperand(0)); 736 unsigned AlignArg = II->getNumArgOperands() - 1; 737 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 738 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 739 II->setArgOperand(AlignArg, 740 ConstantInt::get(Type::getInt32Ty(II->getContext()), 741 MemAlign, false)); 742 return II; 743 } 744 break; 745 } 746 747 case Intrinsic::stackrestore: { 748 // If the save is right next to the restore, remove the restore. This can 749 // happen when variable allocas are DCE'd. 750 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 751 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 752 BasicBlock::iterator BI = SS; 753 if (&*++BI == II) 754 return EraseInstFromFunction(CI); 755 } 756 } 757 758 // Scan down this block to see if there is another stack restore in the 759 // same block without an intervening call/alloca. 760 BasicBlock::iterator BI = II; 761 TerminatorInst *TI = II->getParent()->getTerminator(); 762 bool CannotRemove = false; 763 for (++BI; &*BI != TI; ++BI) { 764 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 765 CannotRemove = true; 766 break; 767 } 768 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 769 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 770 // If there is a stackrestore below this one, remove this one. 771 if (II->getIntrinsicID() == Intrinsic::stackrestore) 772 return EraseInstFromFunction(CI); 773 // Otherwise, ignore the intrinsic. 774 } else { 775 // If we found a non-intrinsic call, we can't remove the stack 776 // restore. 777 CannotRemove = true; 778 break; 779 } 780 } 781 } 782 783 // If the stack restore is in a return/unwind block and if there are no 784 // allocas or calls between the restore and the return, nuke the restore. 785 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 786 return EraseInstFromFunction(CI); 787 break; 788 } 789 } 790 791 return visitCallSite(II); 792} 793 794// InvokeInst simplification 795// 796Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 797 return visitCallSite(&II); 798} 799 800/// isSafeToEliminateVarargsCast - If this cast does not affect the value 801/// passed through the varargs area, we can eliminate the use of the cast. 802static bool isSafeToEliminateVarargsCast(const CallSite CS, 803 const CastInst * const CI, 804 const TargetData * const TD, 805 const int ix) { 806 if (!CI->isLosslessCast()) 807 return false; 808 809 // The size of ByVal arguments is derived from the type, so we 810 // can't change to a type with a different size. If the size were 811 // passed explicitly we could avoid this check. 812 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 813 return true; 814 815 const Type* SrcTy = 816 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 817 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 818 if (!SrcTy->isSized() || !DstTy->isSized()) 819 return false; 820 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 821 return false; 822 return true; 823} 824 825namespace { 826class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 827 InstCombiner *IC; 828protected: 829 void replaceCall(Value *With) { 830 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 831 } 832 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 833 if (ConstantInt *SizeCI = 834 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 835 if (SizeCI->isAllOnesValue()) 836 return true; 837 if (isString) 838 return SizeCI->getZExtValue() >= 839 GetStringLength(CI->getArgOperand(SizeArgOp)); 840 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 841 CI->getArgOperand(SizeArgOp))) 842 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 843 } 844 return false; 845 } 846public: 847 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 848 Instruction *NewInstruction; 849}; 850} // end anonymous namespace 851 852// Try to fold some different type of calls here. 853// Currently we're only working with the checking functions, memcpy_chk, 854// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 855// strcat_chk and strncat_chk. 856Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 857 if (CI->getCalledFunction() == 0) return 0; 858 859 InstCombineFortifiedLibCalls Simplifier(this); 860 Simplifier.fold(CI, TD); 861 return Simplifier.NewInstruction; 862} 863 864// visitCallSite - Improvements for call and invoke instructions. 865// 866Instruction *InstCombiner::visitCallSite(CallSite CS) { 867 bool Changed = false; 868 869 // If the callee is a constexpr cast of a function, attempt to move the cast 870 // to the arguments of the call/invoke. 871 if (transformConstExprCastCall(CS)) return 0; 872 873 Value *Callee = CS.getCalledValue(); 874 875 if (Function *CalleeF = dyn_cast<Function>(Callee)) 876 // If the call and callee calling conventions don't match, this call must 877 // be unreachable, as the call is undefined. 878 if (CalleeF->getCallingConv() != CS.getCallingConv() && 879 // Only do this for calls to a function with a body. A prototype may 880 // not actually end up matching the implementation's calling conv for a 881 // variety of reasons (e.g. it may be written in assembly). 882 !CalleeF->isDeclaration()) { 883 Instruction *OldCall = CS.getInstruction(); 884 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 885 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 886 OldCall); 887 // If OldCall dues not return void then replaceAllUsesWith undef. 888 // This allows ValueHandlers and custom metadata to adjust itself. 889 if (!OldCall->getType()->isVoidTy()) 890 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 891 if (isa<CallInst>(OldCall)) 892 return EraseInstFromFunction(*OldCall); 893 894 // We cannot remove an invoke, because it would change the CFG, just 895 // change the callee to a null pointer. 896 cast<InvokeInst>(OldCall)->setCalledFunction( 897 Constant::getNullValue(CalleeF->getType())); 898 return 0; 899 } 900 901 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 902 // This instruction is not reachable, just remove it. We insert a store to 903 // undef so that we know that this code is not reachable, despite the fact 904 // that we can't modify the CFG here. 905 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 906 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 907 CS.getInstruction()); 908 909 // If CS does not return void then replaceAllUsesWith undef. 910 // This allows ValueHandlers and custom metadata to adjust itself. 911 if (!CS.getInstruction()->getType()->isVoidTy()) 912 CS.getInstruction()-> 913 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 914 915 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 916 // Don't break the CFG, insert a dummy cond branch. 917 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 918 ConstantInt::getTrue(Callee->getContext()), II); 919 } 920 return EraseInstFromFunction(*CS.getInstruction()); 921 } 922 923 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 924 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 925 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 926 return transformCallThroughTrampoline(CS); 927 928 const PointerType *PTy = cast<PointerType>(Callee->getType()); 929 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 930 if (FTy->isVarArg()) { 931 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 932 // See if we can optimize any arguments passed through the varargs area of 933 // the call. 934 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 935 E = CS.arg_end(); I != E; ++I, ++ix) { 936 CastInst *CI = dyn_cast<CastInst>(*I); 937 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 938 *I = CI->getOperand(0); 939 Changed = true; 940 } 941 } 942 } 943 944 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 945 // Inline asm calls cannot throw - mark them 'nounwind'. 946 CS.setDoesNotThrow(); 947 Changed = true; 948 } 949 950 // Try to optimize the call if possible, we require TargetData for most of 951 // this. None of these calls are seen as possibly dead so go ahead and 952 // delete the instruction now. 953 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 954 Instruction *I = tryOptimizeCall(CI, TD); 955 // If we changed something return the result, etc. Otherwise let 956 // the fallthrough check. 957 if (I) return EraseInstFromFunction(*I); 958 } 959 960 return Changed ? CS.getInstruction() : 0; 961} 962 963// transformConstExprCastCall - If the callee is a constexpr cast of a function, 964// attempt to move the cast to the arguments of the call/invoke. 965// 966bool InstCombiner::transformConstExprCastCall(CallSite CS) { 967 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 968 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 969 if (CE->getOpcode() != Instruction::BitCast || 970 !isa<Function>(CE->getOperand(0))) 971 return false; 972 Function *Callee = cast<Function>(CE->getOperand(0)); 973 Instruction *Caller = CS.getInstruction(); 974 const AttrListPtr &CallerPAL = CS.getAttributes(); 975 976 // Okay, this is a cast from a function to a different type. Unless doing so 977 // would cause a type conversion of one of our arguments, change this call to 978 // be a direct call with arguments casted to the appropriate types. 979 // 980 const FunctionType *FT = Callee->getFunctionType(); 981 const Type *OldRetTy = Caller->getType(); 982 const Type *NewRetTy = FT->getReturnType(); 983 984 if (NewRetTy->isStructTy()) 985 return false; // TODO: Handle multiple return values. 986 987 // Check to see if we are changing the return type... 988 if (OldRetTy != NewRetTy) { 989 if (Callee->isDeclaration() && 990 // Conversion is ok if changing from one pointer type to another or from 991 // a pointer to an integer of the same size. 992 !((OldRetTy->isPointerTy() || !TD || 993 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 994 (NewRetTy->isPointerTy() || !TD || 995 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 996 return false; // Cannot transform this return value. 997 998 if (!Caller->use_empty() && 999 // void -> non-void is handled specially 1000 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 1001 return false; // Cannot transform this return value. 1002 1003 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 1004 Attributes RAttrs = CallerPAL.getRetAttributes(); 1005 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 1006 return false; // Attribute not compatible with transformed value. 1007 } 1008 1009 // If the callsite is an invoke instruction, and the return value is used by 1010 // a PHI node in a successor, we cannot change the return type of the call 1011 // because there is no place to put the cast instruction (without breaking 1012 // the critical edge). Bail out in this case. 1013 if (!Caller->use_empty()) 1014 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 1015 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 1016 UI != E; ++UI) 1017 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 1018 if (PN->getParent() == II->getNormalDest() || 1019 PN->getParent() == II->getUnwindDest()) 1020 return false; 1021 } 1022 1023 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 1024 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 1025 1026 CallSite::arg_iterator AI = CS.arg_begin(); 1027 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 1028 const Type *ParamTy = FT->getParamType(i); 1029 const Type *ActTy = (*AI)->getType(); 1030 1031 if (!CastInst::isCastable(ActTy, ParamTy)) 1032 return false; // Cannot transform this parameter value. 1033 1034 if (CallerPAL.getParamAttributes(i + 1) 1035 & Attribute::typeIncompatible(ParamTy)) 1036 return false; // Attribute not compatible with transformed value. 1037 1038 // Converting from one pointer type to another or between a pointer and an 1039 // integer of the same size is safe even if we do not have a body. 1040 bool isConvertible = ActTy == ParamTy || 1041 (TD && ((ParamTy->isPointerTy() || 1042 ParamTy == TD->getIntPtrType(Caller->getContext())) && 1043 (ActTy->isPointerTy() || 1044 ActTy == TD->getIntPtrType(Caller->getContext())))); 1045 if (Callee->isDeclaration() && !isConvertible) return false; 1046 } 1047 1048 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 1049 Callee->isDeclaration()) 1050 return false; // Do not delete arguments unless we have a function body. 1051 1052 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 1053 !CallerPAL.isEmpty()) 1054 // In this case we have more arguments than the new function type, but we 1055 // won't be dropping them. Check that these extra arguments have attributes 1056 // that are compatible with being a vararg call argument. 1057 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 1058 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 1059 break; 1060 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1061 if (PAttrs & Attribute::VarArgsIncompatible) 1062 return false; 1063 } 1064 1065 // Okay, we decided that this is a safe thing to do: go ahead and start 1066 // inserting cast instructions as necessary... 1067 std::vector<Value*> Args; 1068 Args.reserve(NumActualArgs); 1069 SmallVector<AttributeWithIndex, 8> attrVec; 1070 attrVec.reserve(NumCommonArgs); 1071 1072 // Get any return attributes. 1073 Attributes RAttrs = CallerPAL.getRetAttributes(); 1074 1075 // If the return value is not being used, the type may not be compatible 1076 // with the existing attributes. Wipe out any problematic attributes. 1077 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1078 1079 // Add the new return attributes. 1080 if (RAttrs) 1081 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1082 1083 AI = CS.arg_begin(); 1084 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1085 const Type *ParamTy = FT->getParamType(i); 1086 if ((*AI)->getType() == ParamTy) { 1087 Args.push_back(*AI); 1088 } else { 1089 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1090 false, ParamTy, false); 1091 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1092 } 1093 1094 // Add any parameter attributes. 1095 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1096 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1097 } 1098 1099 // If the function takes more arguments than the call was taking, add them 1100 // now. 1101 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1102 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1103 1104 // If we are removing arguments to the function, emit an obnoxious warning. 1105 if (FT->getNumParams() < NumActualArgs) { 1106 if (!FT->isVarArg()) { 1107 errs() << "WARNING: While resolving call to function '" 1108 << Callee->getName() << "' arguments were dropped!\n"; 1109 } else { 1110 // Add all of the arguments in their promoted form to the arg list. 1111 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1112 const Type *PTy = getPromotedType((*AI)->getType()); 1113 if (PTy != (*AI)->getType()) { 1114 // Must promote to pass through va_arg area! 1115 Instruction::CastOps opcode = 1116 CastInst::getCastOpcode(*AI, false, PTy, false); 1117 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1118 } else { 1119 Args.push_back(*AI); 1120 } 1121 1122 // Add any parameter attributes. 1123 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1124 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1125 } 1126 } 1127 } 1128 1129 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1130 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1131 1132 if (NewRetTy->isVoidTy()) 1133 Caller->setName(""); // Void type should not have a name. 1134 1135 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1136 attrVec.end()); 1137 1138 Instruction *NC; 1139 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1140 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1141 Args.begin(), Args.end(), 1142 Caller->getName(), Caller); 1143 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1144 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1145 } else { 1146 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1147 Caller->getName(), Caller); 1148 CallInst *CI = cast<CallInst>(Caller); 1149 if (CI->isTailCall()) 1150 cast<CallInst>(NC)->setTailCall(); 1151 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1152 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1153 } 1154 1155 // Insert a cast of the return type as necessary. 1156 Value *NV = NC; 1157 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1158 if (!NV->getType()->isVoidTy()) { 1159 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1160 OldRetTy, false); 1161 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1162 1163 // If this is an invoke instruction, we should insert it after the first 1164 // non-phi, instruction in the normal successor block. 1165 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1166 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1167 InsertNewInstBefore(NC, *I); 1168 } else { 1169 // Otherwise, it's a call, just insert cast right after the call instr 1170 InsertNewInstBefore(NC, *Caller); 1171 } 1172 Worklist.AddUsersToWorkList(*Caller); 1173 } else { 1174 NV = UndefValue::get(Caller->getType()); 1175 } 1176 } 1177 1178 1179 if (!Caller->use_empty()) 1180 Caller->replaceAllUsesWith(NV); 1181 1182 EraseInstFromFunction(*Caller); 1183 return true; 1184} 1185 1186// transformCallThroughTrampoline - Turn a call to a function created by the 1187// init_trampoline intrinsic into a direct call to the underlying function. 1188// 1189Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1190 Value *Callee = CS.getCalledValue(); 1191 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1192 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1193 const AttrListPtr &Attrs = CS.getAttributes(); 1194 1195 // If the call already has the 'nest' attribute somewhere then give up - 1196 // otherwise 'nest' would occur twice after splicing in the chain. 1197 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1198 return 0; 1199 1200 IntrinsicInst *Tramp = 1201 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1202 1203 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1204 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1205 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1206 1207 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1208 if (!NestAttrs.isEmpty()) { 1209 unsigned NestIdx = 1; 1210 const Type *NestTy = 0; 1211 Attributes NestAttr = Attribute::None; 1212 1213 // Look for a parameter marked with the 'nest' attribute. 1214 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1215 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1216 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1217 // Record the parameter type and any other attributes. 1218 NestTy = *I; 1219 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1220 break; 1221 } 1222 1223 if (NestTy) { 1224 Instruction *Caller = CS.getInstruction(); 1225 std::vector<Value*> NewArgs; 1226 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1227 1228 SmallVector<AttributeWithIndex, 8> NewAttrs; 1229 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1230 1231 // Insert the nest argument into the call argument list, which may 1232 // mean appending it. Likewise for attributes. 1233 1234 // Add any result attributes. 1235 if (Attributes Attr = Attrs.getRetAttributes()) 1236 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1237 1238 { 1239 unsigned Idx = 1; 1240 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1241 do { 1242 if (Idx == NestIdx) { 1243 // Add the chain argument and attributes. 1244 Value *NestVal = Tramp->getArgOperand(2); 1245 if (NestVal->getType() != NestTy) 1246 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1247 NewArgs.push_back(NestVal); 1248 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1249 } 1250 1251 if (I == E) 1252 break; 1253 1254 // Add the original argument and attributes. 1255 NewArgs.push_back(*I); 1256 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1257 NewAttrs.push_back 1258 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1259 1260 ++Idx, ++I; 1261 } while (1); 1262 } 1263 1264 // Add any function attributes. 1265 if (Attributes Attr = Attrs.getFnAttributes()) 1266 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1267 1268 // The trampoline may have been bitcast to a bogus type (FTy). 1269 // Handle this by synthesizing a new function type, equal to FTy 1270 // with the chain parameter inserted. 1271 1272 std::vector<const Type*> NewTypes; 1273 NewTypes.reserve(FTy->getNumParams()+1); 1274 1275 // Insert the chain's type into the list of parameter types, which may 1276 // mean appending it. 1277 { 1278 unsigned Idx = 1; 1279 FunctionType::param_iterator I = FTy->param_begin(), 1280 E = FTy->param_end(); 1281 1282 do { 1283 if (Idx == NestIdx) 1284 // Add the chain's type. 1285 NewTypes.push_back(NestTy); 1286 1287 if (I == E) 1288 break; 1289 1290 // Add the original type. 1291 NewTypes.push_back(*I); 1292 1293 ++Idx, ++I; 1294 } while (1); 1295 } 1296 1297 // Replace the trampoline call with a direct call. Let the generic 1298 // code sort out any function type mismatches. 1299 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1300 FTy->isVarArg()); 1301 Constant *NewCallee = 1302 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1303 NestF : ConstantExpr::getBitCast(NestF, 1304 PointerType::getUnqual(NewFTy)); 1305 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1306 NewAttrs.end()); 1307 1308 Instruction *NewCaller; 1309 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1310 NewCaller = InvokeInst::Create(NewCallee, 1311 II->getNormalDest(), II->getUnwindDest(), 1312 NewArgs.begin(), NewArgs.end(), 1313 Caller->getName(), Caller); 1314 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1315 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1316 } else { 1317 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1318 Caller->getName(), Caller); 1319 if (cast<CallInst>(Caller)->isTailCall()) 1320 cast<CallInst>(NewCaller)->setTailCall(); 1321 cast<CallInst>(NewCaller)-> 1322 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1323 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1324 } 1325 if (!Caller->getType()->isVoidTy()) 1326 Caller->replaceAllUsesWith(NewCaller); 1327 Caller->eraseFromParent(); 1328 Worklist.Remove(Caller); 1329 return 0; 1330 } 1331 } 1332 1333 // Replace the trampoline call with a direct call. Since there is no 'nest' 1334 // parameter, there is no need to adjust the argument list. Let the generic 1335 // code sort out any function type mismatches. 1336 Constant *NewCallee = 1337 NestF->getType() == PTy ? NestF : 1338 ConstantExpr::getBitCast(NestF, PTy); 1339 CS.setCalledFunction(NewCallee); 1340 return CS.getInstruction(); 1341} 1342 1343