InstCombineCalls.cpp revision e754d3fb852abdeaf910c7331eed60f6303597c1
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/Transforms/Utils/BuildLibCalls.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static const Type *getPromotedType(const Type *Ty) { 25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32/// EnforceKnownAlignment - If the specified pointer points to an object that 33/// we control, modify the object's alignment to PrefAlign. This isn't 34/// often possible though. If alignment is important, a more reliable approach 35/// is to simply align all global variables and allocation instructions to 36/// their preferred alignment from the beginning. 37/// 38static unsigned EnforceKnownAlignment(Value *V, 39 unsigned Align, unsigned PrefAlign) { 40 41 User *U = dyn_cast<User>(V); 42 if (!U) return Align; 43 44 switch (Operator::getOpcode(U)) { 45 default: break; 46 case Instruction::BitCast: 47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 48 case Instruction::GetElementPtr: { 49 // If all indexes are zero, it is just the alignment of the base pointer. 50 bool AllZeroOperands = true; 51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 52 if (!isa<Constant>(*i) || 53 !cast<Constant>(*i)->isNullValue()) { 54 AllZeroOperands = false; 55 break; 56 } 57 58 if (AllZeroOperands) { 59 // Treat this like a bitcast. 60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 61 } 62 break; 63 } 64 } 65 66 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 67 // If there is a large requested alignment and we can, bump up the alignment 68 // of the global. 69 if (!GV->isDeclaration()) { 70 if (GV->getAlignment() >= PrefAlign) 71 Align = GV->getAlignment(); 72 else { 73 GV->setAlignment(PrefAlign); 74 Align = PrefAlign; 75 } 76 } 77 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 78 // If there is a requested alignment and if this is an alloca, round up. 79 if (AI->getAlignment() >= PrefAlign) 80 Align = AI->getAlignment(); 81 else { 82 AI->setAlignment(PrefAlign); 83 Align = PrefAlign; 84 } 85 } 86 87 return Align; 88} 89 90/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 91/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 92/// and it is more than the alignment of the ultimate object, see if we can 93/// increase the alignment of the ultimate object, making this check succeed. 94unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 95 unsigned PrefAlign) { 96 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 97 sizeof(PrefAlign) * CHAR_BIT; 98 APInt Mask = APInt::getAllOnesValue(BitWidth); 99 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 100 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 101 unsigned TrailZ = KnownZero.countTrailingOnes(); 102 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 103 104 if (PrefAlign > Align) 105 Align = EnforceKnownAlignment(V, Align, PrefAlign); 106 107 // We don't need to make any adjustment. 108 return Align; 109} 110 111Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 112 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1)); 113 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2)); 114 unsigned MinAlign = std::min(DstAlign, SrcAlign); 115 unsigned CopyAlign = MI->getAlignment(); 116 117 if (CopyAlign < MinAlign) { 118 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 119 MinAlign, false)); 120 return MI; 121 } 122 123 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 124 // load/store. 125 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3)); 126 if (MemOpLength == 0) return 0; 127 128 // Source and destination pointer types are always "i8*" for intrinsic. See 129 // if the size is something we can handle with a single primitive load/store. 130 // A single load+store correctly handles overlapping memory in the memmove 131 // case. 132 unsigned Size = MemOpLength->getZExtValue(); 133 if (Size == 0) return MI; // Delete this mem transfer. 134 135 if (Size > 8 || (Size&(Size-1))) 136 return 0; // If not 1/2/4/8 bytes, exit. 137 138 // Use an integer load+store unless we can find something better. 139 Type *NewPtrTy = 140 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3)); 141 142 // Memcpy forces the use of i8* for the source and destination. That means 143 // that if you're using memcpy to move one double around, you'll get a cast 144 // from double* to i8*. We'd much rather use a double load+store rather than 145 // an i64 load+store, here because this improves the odds that the source or 146 // dest address will be promotable. See if we can find a better type than the 147 // integer datatype. 148 Value *StrippedDest = MI->getOperand(1)->stripPointerCasts(); 149 if (StrippedDest != MI->getOperand(1)) { 150 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 151 ->getElementType(); 152 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 153 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 154 // down through these levels if so. 155 while (!SrcETy->isSingleValueType()) { 156 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 157 if (STy->getNumElements() == 1) 158 SrcETy = STy->getElementType(0); 159 else 160 break; 161 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 162 if (ATy->getNumElements() == 1) 163 SrcETy = ATy->getElementType(); 164 else 165 break; 166 } else 167 break; 168 } 169 170 if (SrcETy->isSingleValueType()) 171 NewPtrTy = PointerType::getUnqual(SrcETy); 172 } 173 } 174 175 176 // If the memcpy/memmove provides better alignment info than we can 177 // infer, use it. 178 SrcAlign = std::max(SrcAlign, CopyAlign); 179 DstAlign = std::max(DstAlign, CopyAlign); 180 181 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy); 182 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy); 183 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign); 184 InsertNewInstBefore(L, *MI); 185 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI); 186 187 // Set the size of the copy to 0, it will be deleted on the next iteration. 188 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType())); 189 return MI; 190} 191 192Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 193 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 194 if (MI->getAlignment() < Alignment) { 195 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 196 Alignment, false)); 197 return MI; 198 } 199 200 // Extract the length and alignment and fill if they are constant. 201 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 202 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 203 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 204 return 0; 205 uint64_t Len = LenC->getZExtValue(); 206 Alignment = MI->getAlignment(); 207 208 // If the length is zero, this is a no-op 209 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 210 211 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 212 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 213 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 214 215 Value *Dest = MI->getDest(); 216 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 217 218 // Alignment 0 is identity for alignment 1 for memset, but not store. 219 if (Alignment == 0) Alignment = 1; 220 221 // Extract the fill value and store. 222 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 223 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 224 Dest, false, Alignment), *MI); 225 226 // Set the size of the copy to 0, it will be deleted on the next iteration. 227 MI->setLength(Constant::getNullValue(LenC->getType())); 228 return MI; 229 } 230 231 return 0; 232} 233 234/// visitCallInst - CallInst simplification. This mostly only handles folding 235/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 236/// the heavy lifting. 237/// 238Instruction *InstCombiner::visitCallInst(CallInst &CI) { 239 if (isFreeCall(&CI)) 240 return visitFree(CI); 241 242 // If the caller function is nounwind, mark the call as nounwind, even if the 243 // callee isn't. 244 if (CI.getParent()->getParent()->doesNotThrow() && 245 !CI.doesNotThrow()) { 246 CI.setDoesNotThrow(); 247 return &CI; 248 } 249 250 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 251 if (!II) return visitCallSite(&CI); 252 253 // Intrinsics cannot occur in an invoke, so handle them here instead of in 254 // visitCallSite. 255 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 256 bool Changed = false; 257 258 // memmove/cpy/set of zero bytes is a noop. 259 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 260 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 261 262 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 263 if (CI->getZExtValue() == 1) { 264 // Replace the instruction with just byte operations. We would 265 // transform other cases to loads/stores, but we don't know if 266 // alignment is sufficient. 267 } 268 } 269 270 // If we have a memmove and the source operation is a constant global, 271 // then the source and dest pointers can't alias, so we can change this 272 // into a call to memcpy. 273 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 274 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 275 if (GVSrc->isConstant()) { 276 Module *M = CI.getParent()->getParent()->getParent(); 277 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 278 const Type *Tys[1]; 279 Tys[0] = CI.getOperand(3)->getType(); 280 CI.setOperand(0, 281 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1)); 282 Changed = true; 283 } 284 } 285 286 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 287 // memmove(x,x,size) -> noop. 288 if (MTI->getSource() == MTI->getDest()) 289 return EraseInstFromFunction(CI); 290 } 291 292 // If we can determine a pointer alignment that is bigger than currently 293 // set, update the alignment. 294 if (isa<MemTransferInst>(MI)) { 295 if (Instruction *I = SimplifyMemTransfer(MI)) 296 return I; 297 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 298 if (Instruction *I = SimplifyMemSet(MSI)) 299 return I; 300 } 301 302 if (Changed) return II; 303 } 304 305 switch (II->getIntrinsicID()) { 306 default: break; 307 case Intrinsic::objectsize: { 308 // We need target data for just about everything so depend on it. 309 if (!TD) break; 310 311 const Type *ReturnTy = CI.getType(); 312 bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1); 313 314 // Get to the real allocated thing and offset as fast as possible. 315 Value *Op1 = II->getOperand(1)->stripPointerCasts(); 316 317 // If we've stripped down to a single global variable that we 318 // can know the size of then just return that. 319 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 320 if (GV->hasDefinitiveInitializer()) { 321 Constant *C = GV->getInitializer(); 322 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 323 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 324 } else { 325 // Can't determine size of the GV. 326 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 327 return ReplaceInstUsesWith(CI, RetVal); 328 } 329 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 330 // Get alloca size. 331 if (AI->getAllocatedType()->isSized()) { 332 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 333 if (AI->isArrayAllocation()) { 334 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 335 if (!C) break; 336 AllocaSize *= C->getZExtValue(); 337 } 338 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 339 } 340 } else if (CallInst *MI = extractMallocCall(Op1)) { 341 const Type* MallocType = getMallocAllocatedType(MI); 342 // Get alloca size. 343 if (MallocType && MallocType->isSized()) { 344 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 345 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 346 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 347 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 348 } 349 } 350 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 351 // Only handle constant GEPs here. 352 if (CE->getOpcode() != Instruction::GetElementPtr) break; 353 GEPOperator *GEP = cast<GEPOperator>(CE); 354 355 // Make sure we're not a constant offset from an external 356 // global. 357 Value *Operand = GEP->getPointerOperand(); 358 Operand = Operand->stripPointerCasts(); 359 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 360 if (!GV->hasDefinitiveInitializer()) break; 361 362 // Get what we're pointing to and its size. 363 const PointerType *BaseType = 364 cast<PointerType>(Operand->getType()); 365 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 366 367 // Get the current byte offset into the thing. Use the original 368 // operand in case we're looking through a bitcast. 369 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 370 const PointerType *OffsetType = 371 cast<PointerType>(GEP->getPointerOperand()->getType()); 372 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 373 374 if (Size < Offset) { 375 // Out of bound reference? Negative index normalized to large 376 // index? Just return "I don't know". 377 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 378 return ReplaceInstUsesWith(CI, RetVal); 379 } 380 381 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 382 return ReplaceInstUsesWith(CI, RetVal); 383 384 } 385 386 // Do not return "I don't know" here. Later optimization passes could 387 // make it possible to evaluate objectsize to a constant. 388 break; 389 } 390 case Intrinsic::bswap: 391 // bswap(bswap(x)) -> x 392 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1))) 393 if (Operand->getIntrinsicID() == Intrinsic::bswap) 394 return ReplaceInstUsesWith(CI, Operand->getOperand(1)); 395 396 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 397 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) { 398 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 399 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 400 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 401 TI->getType()->getPrimitiveSizeInBits(); 402 Value *CV = ConstantInt::get(Operand->getType(), C); 403 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV); 404 return new TruncInst(V, TI->getType()); 405 } 406 } 407 408 break; 409 case Intrinsic::powi: 410 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) { 411 // powi(x, 0) -> 1.0 412 if (Power->isZero()) 413 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 414 // powi(x, 1) -> x 415 if (Power->isOne()) 416 return ReplaceInstUsesWith(CI, II->getOperand(1)); 417 // powi(x, -1) -> 1/x 418 if (Power->isAllOnesValue()) 419 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 420 II->getOperand(1)); 421 } 422 break; 423 case Intrinsic::cttz: { 424 // If all bits below the first known one are known zero, 425 // this value is constant. 426 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 427 uint32_t BitWidth = IT->getBitWidth(); 428 APInt KnownZero(BitWidth, 0); 429 APInt KnownOne(BitWidth, 0); 430 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 431 KnownZero, KnownOne); 432 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 433 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 434 if ((Mask & KnownZero) == Mask) 435 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 436 APInt(BitWidth, TrailingZeros))); 437 438 } 439 break; 440 case Intrinsic::ctlz: { 441 // If all bits above the first known one are known zero, 442 // this value is constant. 443 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 444 uint32_t BitWidth = IT->getBitWidth(); 445 APInt KnownZero(BitWidth, 0); 446 APInt KnownOne(BitWidth, 0); 447 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 448 KnownZero, KnownOne); 449 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 450 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 451 if ((Mask & KnownZero) == Mask) 452 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 453 APInt(BitWidth, LeadingZeros))); 454 455 } 456 break; 457 case Intrinsic::uadd_with_overflow: { 458 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2); 459 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 460 uint32_t BitWidth = IT->getBitWidth(); 461 APInt Mask = APInt::getSignBit(BitWidth); 462 APInt LHSKnownZero(BitWidth, 0); 463 APInt LHSKnownOne(BitWidth, 0); 464 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 465 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 466 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 467 468 if (LHSKnownNegative || LHSKnownPositive) { 469 APInt RHSKnownZero(BitWidth, 0); 470 APInt RHSKnownOne(BitWidth, 0); 471 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 472 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 473 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 474 if (LHSKnownNegative && RHSKnownNegative) { 475 // The sign bit is set in both cases: this MUST overflow. 476 // Create a simple add instruction, and insert it into the struct. 477 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 478 Worklist.Add(Add); 479 Constant *V[] = { 480 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 481 }; 482 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 483 return InsertValueInst::Create(Struct, Add, 0); 484 } 485 486 if (LHSKnownPositive && RHSKnownPositive) { 487 // The sign bit is clear in both cases: this CANNOT overflow. 488 // Create a simple add instruction, and insert it into the struct. 489 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 490 Worklist.Add(Add); 491 Constant *V[] = { 492 UndefValue::get(LHS->getType()), 493 ConstantInt::getFalse(II->getContext()) 494 }; 495 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 496 return InsertValueInst::Create(Struct, Add, 0); 497 } 498 } 499 } 500 // FALL THROUGH uadd into sadd 501 case Intrinsic::sadd_with_overflow: 502 // Canonicalize constants into the RHS. 503 if (isa<Constant>(II->getOperand(1)) && 504 !isa<Constant>(II->getOperand(2))) { 505 Value *LHS = II->getOperand(1); 506 II->setOperand(1, II->getOperand(2)); 507 II->setOperand(2, LHS); 508 return II; 509 } 510 511 // X + undef -> undef 512 if (isa<UndefValue>(II->getOperand(2))) 513 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 514 515 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 516 // X + 0 -> {X, false} 517 if (RHS->isZero()) { 518 Constant *V[] = { 519 UndefValue::get(II->getOperand(0)->getType()), 520 ConstantInt::getFalse(II->getContext()) 521 }; 522 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 523 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 524 } 525 } 526 break; 527 case Intrinsic::usub_with_overflow: 528 case Intrinsic::ssub_with_overflow: 529 // undef - X -> undef 530 // X - undef -> undef 531 if (isa<UndefValue>(II->getOperand(1)) || 532 isa<UndefValue>(II->getOperand(2))) 533 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 534 535 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 536 // X - 0 -> {X, false} 537 if (RHS->isZero()) { 538 Constant *V[] = { 539 UndefValue::get(II->getOperand(1)->getType()), 540 ConstantInt::getFalse(II->getContext()) 541 }; 542 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 543 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 544 } 545 } 546 break; 547 case Intrinsic::umul_with_overflow: 548 case Intrinsic::smul_with_overflow: 549 // Canonicalize constants into the RHS. 550 if (isa<Constant>(II->getOperand(1)) && 551 !isa<Constant>(II->getOperand(2))) { 552 Value *LHS = II->getOperand(1); 553 II->setOperand(1, II->getOperand(2)); 554 II->setOperand(2, LHS); 555 return II; 556 } 557 558 // X * undef -> undef 559 if (isa<UndefValue>(II->getOperand(2))) 560 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 561 562 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) { 563 // X*0 -> {0, false} 564 if (RHSI->isZero()) 565 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 566 567 // X * 1 -> {X, false} 568 if (RHSI->equalsInt(1)) { 569 Constant *V[] = { 570 UndefValue::get(II->getOperand(1)->getType()), 571 ConstantInt::getFalse(II->getContext()) 572 }; 573 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 574 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 575 } 576 } 577 break; 578 case Intrinsic::ppc_altivec_lvx: 579 case Intrinsic::ppc_altivec_lvxl: 580 case Intrinsic::x86_sse_loadu_ps: 581 case Intrinsic::x86_sse2_loadu_pd: 582 case Intrinsic::x86_sse2_loadu_dq: 583 // Turn PPC lvx -> load if the pointer is known aligned. 584 // Turn X86 loadups -> load if the pointer is known aligned. 585 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 586 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), 587 PointerType::getUnqual(II->getType())); 588 return new LoadInst(Ptr); 589 } 590 break; 591 case Intrinsic::ppc_altivec_stvx: 592 case Intrinsic::ppc_altivec_stvxl: 593 // Turn stvx -> store if the pointer is known aligned. 594 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) { 595 const Type *OpPtrTy = 596 PointerType::getUnqual(II->getOperand(1)->getType()); 597 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy); 598 return new StoreInst(II->getOperand(1), Ptr); 599 } 600 break; 601 case Intrinsic::x86_sse_storeu_ps: 602 case Intrinsic::x86_sse2_storeu_pd: 603 case Intrinsic::x86_sse2_storeu_dq: 604 // Turn X86 storeu -> store if the pointer is known aligned. 605 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 606 const Type *OpPtrTy = 607 PointerType::getUnqual(II->getOperand(2)->getType()); 608 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy); 609 return new StoreInst(II->getOperand(2), Ptr); 610 } 611 break; 612 613 case Intrinsic::x86_sse_cvttss2si: { 614 // These intrinsics only demands the 0th element of its input vector. If 615 // we can simplify the input based on that, do so now. 616 unsigned VWidth = 617 cast<VectorType>(II->getOperand(1)->getType())->getNumElements(); 618 APInt DemandedElts(VWidth, 1); 619 APInt UndefElts(VWidth, 0); 620 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts, 621 UndefElts)) { 622 II->setOperand(1, V); 623 return II; 624 } 625 break; 626 } 627 628 case Intrinsic::ppc_altivec_vperm: 629 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 630 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) { 631 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 632 633 // Check that all of the elements are integer constants or undefs. 634 bool AllEltsOk = true; 635 for (unsigned i = 0; i != 16; ++i) { 636 if (!isa<ConstantInt>(Mask->getOperand(i)) && 637 !isa<UndefValue>(Mask->getOperand(i))) { 638 AllEltsOk = false; 639 break; 640 } 641 } 642 643 if (AllEltsOk) { 644 // Cast the input vectors to byte vectors. 645 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType()); 646 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType()); 647 Value *Result = UndefValue::get(Op0->getType()); 648 649 // Only extract each element once. 650 Value *ExtractedElts[32]; 651 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 652 653 for (unsigned i = 0; i != 16; ++i) { 654 if (isa<UndefValue>(Mask->getOperand(i))) 655 continue; 656 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 657 Idx &= 31; // Match the hardware behavior. 658 659 if (ExtractedElts[Idx] == 0) { 660 ExtractedElts[Idx] = 661 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 662 ConstantInt::get(Type::getInt32Ty(II->getContext()), 663 Idx&15, false), "tmp"); 664 } 665 666 // Insert this value into the result vector. 667 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 668 ConstantInt::get(Type::getInt32Ty(II->getContext()), 669 i, false), "tmp"); 670 } 671 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 672 } 673 } 674 break; 675 676 case Intrinsic::stackrestore: { 677 // If the save is right next to the restore, remove the restore. This can 678 // happen when variable allocas are DCE'd. 679 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) { 680 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 681 BasicBlock::iterator BI = SS; 682 if (&*++BI == II) 683 return EraseInstFromFunction(CI); 684 } 685 } 686 687 // Scan down this block to see if there is another stack restore in the 688 // same block without an intervening call/alloca. 689 BasicBlock::iterator BI = II; 690 TerminatorInst *TI = II->getParent()->getTerminator(); 691 bool CannotRemove = false; 692 for (++BI; &*BI != TI; ++BI) { 693 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 694 CannotRemove = true; 695 break; 696 } 697 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 698 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 699 // If there is a stackrestore below this one, remove this one. 700 if (II->getIntrinsicID() == Intrinsic::stackrestore) 701 return EraseInstFromFunction(CI); 702 // Otherwise, ignore the intrinsic. 703 } else { 704 // If we found a non-intrinsic call, we can't remove the stack 705 // restore. 706 CannotRemove = true; 707 break; 708 } 709 } 710 } 711 712 // If the stack restore is in a return/unwind block and if there are no 713 // allocas or calls between the restore and the return, nuke the restore. 714 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 715 return EraseInstFromFunction(CI); 716 break; 717 } 718 } 719 720 return visitCallSite(II); 721} 722 723// InvokeInst simplification 724// 725Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 726 return visitCallSite(&II); 727} 728 729/// isSafeToEliminateVarargsCast - If this cast does not affect the value 730/// passed through the varargs area, we can eliminate the use of the cast. 731static bool isSafeToEliminateVarargsCast(const CallSite CS, 732 const CastInst * const CI, 733 const TargetData * const TD, 734 const int ix) { 735 if (!CI->isLosslessCast()) 736 return false; 737 738 // The size of ByVal arguments is derived from the type, so we 739 // can't change to a type with a different size. If the size were 740 // passed explicitly we could avoid this check. 741 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 742 return true; 743 744 const Type* SrcTy = 745 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 746 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 747 if (!SrcTy->isSized() || !DstTy->isSized()) 748 return false; 749 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 750 return false; 751 return true; 752} 753 754namespace { 755class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 756 InstCombiner *IC; 757protected: 758 void replaceCall(Value *With) { 759 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 760 } 761 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 762 if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp))) { 763 if (SizeCI->isAllOnesValue()) 764 return true; 765 if (isString) 766 return SizeCI->getZExtValue() >= 767 GetStringLength(CI->getOperand(SizeArgOp)); 768 if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getOperand(SizeArgOp))) 769 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 770 } 771 return false; 772 } 773public: 774 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 775 Instruction *NewInstruction; 776}; 777} // end anonymous namespace 778 779// Try to fold some different type of calls here. 780// Currently we're only working with the checking functions, memcpy_chk, 781// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 782// strcat_chk and strncat_chk. 783Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 784 if (CI->getCalledFunction() == 0) return 0; 785 786 InstCombineFortifiedLibCalls Simplifier(this); 787 Simplifier.fold(CI, TD); 788 return Simplifier.NewInstruction; 789} 790 791// visitCallSite - Improvements for call and invoke instructions. 792// 793Instruction *InstCombiner::visitCallSite(CallSite CS) { 794 bool Changed = false; 795 796 // If the callee is a constexpr cast of a function, attempt to move the cast 797 // to the arguments of the call/invoke. 798 if (transformConstExprCastCall(CS)) return 0; 799 800 Value *Callee = CS.getCalledValue(); 801 802 if (Function *CalleeF = dyn_cast<Function>(Callee)) 803 // If the call and callee calling conventions don't match, this call must 804 // be unreachable, as the call is undefined. 805 if (CalleeF->getCallingConv() != CS.getCallingConv() && 806 // Only do this for calls to a function with a body. A prototype may 807 // not actually end up matching the implementation's calling conv for a 808 // variety of reasons (e.g. it may be written in assembly). 809 !CalleeF->isDeclaration()) { 810 Instruction *OldCall = CS.getInstruction(); 811 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 812 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 813 OldCall); 814 // If OldCall dues not return void then replaceAllUsesWith undef. 815 // This allows ValueHandlers and custom metadata to adjust itself. 816 if (!OldCall->getType()->isVoidTy()) 817 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 818 if (isa<CallInst>(OldCall)) 819 return EraseInstFromFunction(*OldCall); 820 821 // We cannot remove an invoke, because it would change the CFG, just 822 // change the callee to a null pointer. 823 cast<InvokeInst>(OldCall)->setCalledFunction( 824 Constant::getNullValue(CalleeF->getType())); 825 return 0; 826 } 827 828 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 829 // This instruction is not reachable, just remove it. We insert a store to 830 // undef so that we know that this code is not reachable, despite the fact 831 // that we can't modify the CFG here. 832 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 833 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 834 CS.getInstruction()); 835 836 // If CS dues not return void then replaceAllUsesWith undef. 837 // This allows ValueHandlers and custom metadata to adjust itself. 838 if (!CS.getInstruction()->getType()->isVoidTy()) 839 CS.getInstruction()-> 840 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 841 842 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 843 // Don't break the CFG, insert a dummy cond branch. 844 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 845 ConstantInt::getTrue(Callee->getContext()), II); 846 } 847 return EraseInstFromFunction(*CS.getInstruction()); 848 } 849 850 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 851 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 852 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 853 return transformCallThroughTrampoline(CS); 854 855 const PointerType *PTy = cast<PointerType>(Callee->getType()); 856 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 857 if (FTy->isVarArg()) { 858 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 859 // See if we can optimize any arguments passed through the varargs area of 860 // the call. 861 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 862 E = CS.arg_end(); I != E; ++I, ++ix) { 863 CastInst *CI = dyn_cast<CastInst>(*I); 864 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 865 *I = CI->getOperand(0); 866 Changed = true; 867 } 868 } 869 } 870 871 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 872 // Inline asm calls cannot throw - mark them 'nounwind'. 873 CS.setDoesNotThrow(); 874 Changed = true; 875 } 876 877 // Try to optimize the call if possible, we require TargetData for most of 878 // this. None of these calls are seen as possibly dead so go ahead and 879 // delete the instruction now. 880 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 881 Instruction *I = tryOptimizeCall(CI, TD); 882 // If we changed something return the result, etc. Otherwise let 883 // the fallthrough check. 884 if (I) return EraseInstFromFunction(*I); 885 } 886 887 return Changed ? CS.getInstruction() : 0; 888} 889 890// transformConstExprCastCall - If the callee is a constexpr cast of a function, 891// attempt to move the cast to the arguments of the call/invoke. 892// 893bool InstCombiner::transformConstExprCastCall(CallSite CS) { 894 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 895 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 896 if (CE->getOpcode() != Instruction::BitCast || 897 !isa<Function>(CE->getOperand(0))) 898 return false; 899 Function *Callee = cast<Function>(CE->getOperand(0)); 900 Instruction *Caller = CS.getInstruction(); 901 const AttrListPtr &CallerPAL = CS.getAttributes(); 902 903 // Okay, this is a cast from a function to a different type. Unless doing so 904 // would cause a type conversion of one of our arguments, change this call to 905 // be a direct call with arguments casted to the appropriate types. 906 // 907 const FunctionType *FT = Callee->getFunctionType(); 908 const Type *OldRetTy = Caller->getType(); 909 const Type *NewRetTy = FT->getReturnType(); 910 911 if (NewRetTy->isStructTy()) 912 return false; // TODO: Handle multiple return values. 913 914 // Check to see if we are changing the return type... 915 if (OldRetTy != NewRetTy) { 916 if (Callee->isDeclaration() && 917 // Conversion is ok if changing from one pointer type to another or from 918 // a pointer to an integer of the same size. 919 !((OldRetTy->isPointerTy() || !TD || 920 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 921 (NewRetTy->isPointerTy() || !TD || 922 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 923 return false; // Cannot transform this return value. 924 925 if (!Caller->use_empty() && 926 // void -> non-void is handled specially 927 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 928 return false; // Cannot transform this return value. 929 930 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 931 Attributes RAttrs = CallerPAL.getRetAttributes(); 932 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 933 return false; // Attribute not compatible with transformed value. 934 } 935 936 // If the callsite is an invoke instruction, and the return value is used by 937 // a PHI node in a successor, we cannot change the return type of the call 938 // because there is no place to put the cast instruction (without breaking 939 // the critical edge). Bail out in this case. 940 if (!Caller->use_empty()) 941 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 942 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 943 UI != E; ++UI) 944 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 945 if (PN->getParent() == II->getNormalDest() || 946 PN->getParent() == II->getUnwindDest()) 947 return false; 948 } 949 950 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 951 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 952 953 CallSite::arg_iterator AI = CS.arg_begin(); 954 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 955 const Type *ParamTy = FT->getParamType(i); 956 const Type *ActTy = (*AI)->getType(); 957 958 if (!CastInst::isCastable(ActTy, ParamTy)) 959 return false; // Cannot transform this parameter value. 960 961 if (CallerPAL.getParamAttributes(i + 1) 962 & Attribute::typeIncompatible(ParamTy)) 963 return false; // Attribute not compatible with transformed value. 964 965 // Converting from one pointer type to another or between a pointer and an 966 // integer of the same size is safe even if we do not have a body. 967 bool isConvertible = ActTy == ParamTy || 968 (TD && ((ParamTy->isPointerTy() || 969 ParamTy == TD->getIntPtrType(Caller->getContext())) && 970 (ActTy->isPointerTy() || 971 ActTy == TD->getIntPtrType(Caller->getContext())))); 972 if (Callee->isDeclaration() && !isConvertible) return false; 973 } 974 975 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 976 Callee->isDeclaration()) 977 return false; // Do not delete arguments unless we have a function body. 978 979 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 980 !CallerPAL.isEmpty()) 981 // In this case we have more arguments than the new function type, but we 982 // won't be dropping them. Check that these extra arguments have attributes 983 // that are compatible with being a vararg call argument. 984 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 985 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 986 break; 987 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 988 if (PAttrs & Attribute::VarArgsIncompatible) 989 return false; 990 } 991 992 // Okay, we decided that this is a safe thing to do: go ahead and start 993 // inserting cast instructions as necessary... 994 std::vector<Value*> Args; 995 Args.reserve(NumActualArgs); 996 SmallVector<AttributeWithIndex, 8> attrVec; 997 attrVec.reserve(NumCommonArgs); 998 999 // Get any return attributes. 1000 Attributes RAttrs = CallerPAL.getRetAttributes(); 1001 1002 // If the return value is not being used, the type may not be compatible 1003 // with the existing attributes. Wipe out any problematic attributes. 1004 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1005 1006 // Add the new return attributes. 1007 if (RAttrs) 1008 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1009 1010 AI = CS.arg_begin(); 1011 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1012 const Type *ParamTy = FT->getParamType(i); 1013 if ((*AI)->getType() == ParamTy) { 1014 Args.push_back(*AI); 1015 } else { 1016 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1017 false, ParamTy, false); 1018 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1019 } 1020 1021 // Add any parameter attributes. 1022 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1023 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1024 } 1025 1026 // If the function takes more arguments than the call was taking, add them 1027 // now. 1028 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1029 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1030 1031 // If we are removing arguments to the function, emit an obnoxious warning. 1032 if (FT->getNumParams() < NumActualArgs) { 1033 if (!FT->isVarArg()) { 1034 errs() << "WARNING: While resolving call to function '" 1035 << Callee->getName() << "' arguments were dropped!\n"; 1036 } else { 1037 // Add all of the arguments in their promoted form to the arg list. 1038 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1039 const Type *PTy = getPromotedType((*AI)->getType()); 1040 if (PTy != (*AI)->getType()) { 1041 // Must promote to pass through va_arg area! 1042 Instruction::CastOps opcode = 1043 CastInst::getCastOpcode(*AI, false, PTy, false); 1044 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1045 } else { 1046 Args.push_back(*AI); 1047 } 1048 1049 // Add any parameter attributes. 1050 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1051 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1052 } 1053 } 1054 } 1055 1056 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1057 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1058 1059 if (NewRetTy->isVoidTy()) 1060 Caller->setName(""); // Void type should not have a name. 1061 1062 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1063 attrVec.end()); 1064 1065 Instruction *NC; 1066 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1067 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1068 Args.begin(), Args.end(), 1069 Caller->getName(), Caller); 1070 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1071 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1072 } else { 1073 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1074 Caller->getName(), Caller); 1075 CallInst *CI = cast<CallInst>(Caller); 1076 if (CI->isTailCall()) 1077 cast<CallInst>(NC)->setTailCall(); 1078 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1079 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1080 } 1081 1082 // Insert a cast of the return type as necessary. 1083 Value *NV = NC; 1084 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1085 if (!NV->getType()->isVoidTy()) { 1086 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1087 OldRetTy, false); 1088 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1089 1090 // If this is an invoke instruction, we should insert it after the first 1091 // non-phi, instruction in the normal successor block. 1092 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1093 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1094 InsertNewInstBefore(NC, *I); 1095 } else { 1096 // Otherwise, it's a call, just insert cast right after the call instr 1097 InsertNewInstBefore(NC, *Caller); 1098 } 1099 Worklist.AddUsersToWorkList(*Caller); 1100 } else { 1101 NV = UndefValue::get(Caller->getType()); 1102 } 1103 } 1104 1105 1106 if (!Caller->use_empty()) 1107 Caller->replaceAllUsesWith(NV); 1108 1109 EraseInstFromFunction(*Caller); 1110 return true; 1111} 1112 1113// transformCallThroughTrampoline - Turn a call to a function created by the 1114// init_trampoline intrinsic into a direct call to the underlying function. 1115// 1116Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1117 Value *Callee = CS.getCalledValue(); 1118 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1119 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1120 const AttrListPtr &Attrs = CS.getAttributes(); 1121 1122 // If the call already has the 'nest' attribute somewhere then give up - 1123 // otherwise 'nest' would occur twice after splicing in the chain. 1124 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1125 return 0; 1126 1127 IntrinsicInst *Tramp = 1128 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1129 1130 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts()); 1131 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1132 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1133 1134 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1135 if (!NestAttrs.isEmpty()) { 1136 unsigned NestIdx = 1; 1137 const Type *NestTy = 0; 1138 Attributes NestAttr = Attribute::None; 1139 1140 // Look for a parameter marked with the 'nest' attribute. 1141 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1142 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1143 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1144 // Record the parameter type and any other attributes. 1145 NestTy = *I; 1146 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1147 break; 1148 } 1149 1150 if (NestTy) { 1151 Instruction *Caller = CS.getInstruction(); 1152 std::vector<Value*> NewArgs; 1153 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1154 1155 SmallVector<AttributeWithIndex, 8> NewAttrs; 1156 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1157 1158 // Insert the nest argument into the call argument list, which may 1159 // mean appending it. Likewise for attributes. 1160 1161 // Add any result attributes. 1162 if (Attributes Attr = Attrs.getRetAttributes()) 1163 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1164 1165 { 1166 unsigned Idx = 1; 1167 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1168 do { 1169 if (Idx == NestIdx) { 1170 // Add the chain argument and attributes. 1171 Value *NestVal = Tramp->getOperand(3); 1172 if (NestVal->getType() != NestTy) 1173 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1174 NewArgs.push_back(NestVal); 1175 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1176 } 1177 1178 if (I == E) 1179 break; 1180 1181 // Add the original argument and attributes. 1182 NewArgs.push_back(*I); 1183 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1184 NewAttrs.push_back 1185 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1186 1187 ++Idx, ++I; 1188 } while (1); 1189 } 1190 1191 // Add any function attributes. 1192 if (Attributes Attr = Attrs.getFnAttributes()) 1193 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1194 1195 // The trampoline may have been bitcast to a bogus type (FTy). 1196 // Handle this by synthesizing a new function type, equal to FTy 1197 // with the chain parameter inserted. 1198 1199 std::vector<const Type*> NewTypes; 1200 NewTypes.reserve(FTy->getNumParams()+1); 1201 1202 // Insert the chain's type into the list of parameter types, which may 1203 // mean appending it. 1204 { 1205 unsigned Idx = 1; 1206 FunctionType::param_iterator I = FTy->param_begin(), 1207 E = FTy->param_end(); 1208 1209 do { 1210 if (Idx == NestIdx) 1211 // Add the chain's type. 1212 NewTypes.push_back(NestTy); 1213 1214 if (I == E) 1215 break; 1216 1217 // Add the original type. 1218 NewTypes.push_back(*I); 1219 1220 ++Idx, ++I; 1221 } while (1); 1222 } 1223 1224 // Replace the trampoline call with a direct call. Let the generic 1225 // code sort out any function type mismatches. 1226 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1227 FTy->isVarArg()); 1228 Constant *NewCallee = 1229 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1230 NestF : ConstantExpr::getBitCast(NestF, 1231 PointerType::getUnqual(NewFTy)); 1232 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1233 NewAttrs.end()); 1234 1235 Instruction *NewCaller; 1236 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1237 NewCaller = InvokeInst::Create(NewCallee, 1238 II->getNormalDest(), II->getUnwindDest(), 1239 NewArgs.begin(), NewArgs.end(), 1240 Caller->getName(), Caller); 1241 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1242 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1243 } else { 1244 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1245 Caller->getName(), Caller); 1246 if (cast<CallInst>(Caller)->isTailCall()) 1247 cast<CallInst>(NewCaller)->setTailCall(); 1248 cast<CallInst>(NewCaller)-> 1249 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1250 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1251 } 1252 if (!Caller->getType()->isVoidTy()) 1253 Caller->replaceAllUsesWith(NewCaller); 1254 Caller->eraseFromParent(); 1255 Worklist.Remove(Caller); 1256 return 0; 1257 } 1258 } 1259 1260 // Replace the trampoline call with a direct call. Since there is no 'nest' 1261 // parameter, there is no need to adjust the argument list. Let the generic 1262 // code sort out any function type mismatches. 1263 Constant *NewCallee = 1264 NestF->getType() == PTy ? NestF : 1265 ConstantExpr::getBitCast(NestF, PTy); 1266 CS.setCalledFunction(NewCallee); 1267 return CS.getInstruction(); 1268} 1269 1270