InstCombineCalls.cpp revision fc8ccfedbba651e324d827de6693aad1491314c7
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/IntrinsicInst.h" 16#include "llvm/Support/CallSite.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19using namespace llvm; 20 21/// getPromotedType - Return the specified type promoted as it would be to pass 22/// though a va_arg area. 23static const Type *getPromotedType(const Type *Ty) { 24 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 25 if (ITy->getBitWidth() < 32) 26 return Type::getInt32Ty(Ty->getContext()); 27 } 28 return Ty; 29} 30 31/// EnforceKnownAlignment - If the specified pointer points to an object that 32/// we control, modify the object's alignment to PrefAlign. This isn't 33/// often possible though. If alignment is important, a more reliable approach 34/// is to simply align all global variables and allocation instructions to 35/// their preferred alignment from the beginning. 36/// 37static unsigned EnforceKnownAlignment(Value *V, 38 unsigned Align, unsigned PrefAlign) { 39 40 User *U = dyn_cast<User>(V); 41 if (!U) return Align; 42 43 switch (Operator::getOpcode(U)) { 44 default: break; 45 case Instruction::BitCast: 46 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 47 case Instruction::GetElementPtr: { 48 // If all indexes are zero, it is just the alignment of the base pointer. 49 bool AllZeroOperands = true; 50 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 51 if (!isa<Constant>(*i) || 52 !cast<Constant>(*i)->isNullValue()) { 53 AllZeroOperands = false; 54 break; 55 } 56 57 if (AllZeroOperands) { 58 // Treat this like a bitcast. 59 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 60 } 61 break; 62 } 63 } 64 65 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 66 // If there is a large requested alignment and we can, bump up the alignment 67 // of the global. 68 if (!GV->isDeclaration()) { 69 if (GV->getAlignment() >= PrefAlign) 70 Align = GV->getAlignment(); 71 else { 72 GV->setAlignment(PrefAlign); 73 Align = PrefAlign; 74 } 75 } 76 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 77 // If there is a requested alignment and if this is an alloca, round up. 78 if (AI->getAlignment() >= PrefAlign) 79 Align = AI->getAlignment(); 80 else { 81 AI->setAlignment(PrefAlign); 82 Align = PrefAlign; 83 } 84 } 85 86 return Align; 87} 88 89/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 90/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 91/// and it is more than the alignment of the ultimate object, see if we can 92/// increase the alignment of the ultimate object, making this check succeed. 93unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 94 unsigned PrefAlign) { 95 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 96 sizeof(PrefAlign) * CHAR_BIT; 97 APInt Mask = APInt::getAllOnesValue(BitWidth); 98 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 99 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 100 unsigned TrailZ = KnownZero.countTrailingOnes(); 101 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 102 103 if (PrefAlign > Align) 104 Align = EnforceKnownAlignment(V, Align, PrefAlign); 105 106 // We don't need to make any adjustment. 107 return Align; 108} 109 110Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 111 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1)); 112 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2)); 113 unsigned MinAlign = std::min(DstAlign, SrcAlign); 114 unsigned CopyAlign = MI->getAlignment(); 115 116 if (CopyAlign < MinAlign) { 117 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 118 MinAlign, false)); 119 return MI; 120 } 121 122 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 123 // load/store. 124 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3)); 125 if (MemOpLength == 0) return 0; 126 127 // Source and destination pointer types are always "i8*" for intrinsic. See 128 // if the size is something we can handle with a single primitive load/store. 129 // A single load+store correctly handles overlapping memory in the memmove 130 // case. 131 unsigned Size = MemOpLength->getZExtValue(); 132 if (Size == 0) return MI; // Delete this mem transfer. 133 134 if (Size > 8 || (Size&(Size-1))) 135 return 0; // If not 1/2/4/8 bytes, exit. 136 137 // Use an integer load+store unless we can find something better. 138 Type *NewPtrTy = 139 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3)); 140 141 // Memcpy forces the use of i8* for the source and destination. That means 142 // that if you're using memcpy to move one double around, you'll get a cast 143 // from double* to i8*. We'd much rather use a double load+store rather than 144 // an i64 load+store, here because this improves the odds that the source or 145 // dest address will be promotable. See if we can find a better type than the 146 // integer datatype. 147 Value *StrippedDest = MI->getOperand(1)->stripPointerCasts(); 148 if (StrippedDest != MI->getOperand(1)) { 149 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 150 ->getElementType(); 151 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 152 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 153 // down through these levels if so. 154 while (!SrcETy->isSingleValueType()) { 155 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 156 if (STy->getNumElements() == 1) 157 SrcETy = STy->getElementType(0); 158 else 159 break; 160 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 161 if (ATy->getNumElements() == 1) 162 SrcETy = ATy->getElementType(); 163 else 164 break; 165 } else 166 break; 167 } 168 169 if (SrcETy->isSingleValueType()) 170 NewPtrTy = PointerType::getUnqual(SrcETy); 171 } 172 } 173 174 175 // If the memcpy/memmove provides better alignment info than we can 176 // infer, use it. 177 SrcAlign = std::max(SrcAlign, CopyAlign); 178 DstAlign = std::max(DstAlign, CopyAlign); 179 180 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy); 181 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy); 182 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign); 183 InsertNewInstBefore(L, *MI); 184 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI); 185 186 // Set the size of the copy to 0, it will be deleted on the next iteration. 187 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType())); 188 return MI; 189} 190 191Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 192 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 193 if (MI->getAlignment() < Alignment) { 194 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 195 Alignment, false)); 196 return MI; 197 } 198 199 // Extract the length and alignment and fill if they are constant. 200 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 201 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 202 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 203 return 0; 204 uint64_t Len = LenC->getZExtValue(); 205 Alignment = MI->getAlignment(); 206 207 // If the length is zero, this is a no-op 208 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 209 210 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 211 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 212 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 213 214 Value *Dest = MI->getDest(); 215 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 216 217 // Alignment 0 is identity for alignment 1 for memset, but not store. 218 if (Alignment == 0) Alignment = 1; 219 220 // Extract the fill value and store. 221 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 222 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 223 Dest, false, Alignment), *MI); 224 225 // Set the size of the copy to 0, it will be deleted on the next iteration. 226 MI->setLength(Constant::getNullValue(LenC->getType())); 227 return MI; 228 } 229 230 return 0; 231} 232 233/// visitCallInst - CallInst simplification. This mostly only handles folding 234/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 235/// the heavy lifting. 236/// 237Instruction *InstCombiner::visitCallInst(CallInst &CI) { 238 if (isFreeCall(&CI)) 239 return visitFree(CI); 240 241 // If the caller function is nounwind, mark the call as nounwind, even if the 242 // callee isn't. 243 if (CI.getParent()->getParent()->doesNotThrow() && 244 !CI.doesNotThrow()) { 245 CI.setDoesNotThrow(); 246 return &CI; 247 } 248 249 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 250 if (!II) return visitCallSite(&CI); 251 252 // Intrinsics cannot occur in an invoke, so handle them here instead of in 253 // visitCallSite. 254 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 255 bool Changed = false; 256 257 // memmove/cpy/set of zero bytes is a noop. 258 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 259 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 260 261 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 262 if (CI->getZExtValue() == 1) { 263 // Replace the instruction with just byte operations. We would 264 // transform other cases to loads/stores, but we don't know if 265 // alignment is sufficient. 266 } 267 } 268 269 // If we have a memmove and the source operation is a constant global, 270 // then the source and dest pointers can't alias, so we can change this 271 // into a call to memcpy. 272 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 273 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 274 if (GVSrc->isConstant()) { 275 Module *M = CI.getParent()->getParent()->getParent(); 276 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 277 const Type *Tys[1]; 278 Tys[0] = CI.getOperand(3)->getType(); 279 CI.setOperand(0, 280 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1)); 281 Changed = true; 282 } 283 } 284 285 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 286 // memmove(x,x,size) -> noop. 287 if (MTI->getSource() == MTI->getDest()) 288 return EraseInstFromFunction(CI); 289 } 290 291 // If we can determine a pointer alignment that is bigger than currently 292 // set, update the alignment. 293 if (isa<MemTransferInst>(MI)) { 294 if (Instruction *I = SimplifyMemTransfer(MI)) 295 return I; 296 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 297 if (Instruction *I = SimplifyMemSet(MSI)) 298 return I; 299 } 300 301 if (Changed) return II; 302 } 303 304 switch (II->getIntrinsicID()) { 305 default: break; 306 case Intrinsic::objectsize: { 307 // We need target data for just about everything so depend on it. 308 if (!TD) break; 309 310 const Type *ReturnTy = CI.getType(); 311 bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1); 312 313 // Get to the real allocated thing and offset as fast as possible. 314 Value *Op1 = II->getOperand(1)->stripPointerCasts(); 315 316 // If we've stripped down to a single global variable that we 317 // can know the size of then just return that. 318 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 319 if (GV->hasDefinitiveInitializer()) { 320 Constant *C = GV->getInitializer(); 321 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 322 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 323 } else { 324 // Can't determine size of the GV. 325 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 326 return ReplaceInstUsesWith(CI, RetVal); 327 } 328 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 329 // Get alloca size. 330 if (AI->getAllocatedType()->isSized()) { 331 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 332 if (AI->isArrayAllocation()) { 333 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 334 if (!C) break; 335 AllocaSize *= C->getZExtValue(); 336 } 337 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 338 } 339 } else if (CallInst *MI = extractMallocCall(Op1)) { 340 const Type* MallocType = getMallocAllocatedType(MI); 341 // Get alloca size. 342 if (MallocType->isSized()) { 343 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 344 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 345 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 346 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 347 } 348 } 349 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 350 // Only handle constant GEPs here. 351 if (CE->getOpcode() != Instruction::GetElementPtr) break; 352 GEPOperator *GEP = cast<GEPOperator>(CE); 353 354 // Make sure we're not a constant offset from an external 355 // global. 356 Value *Operand = GEP->getPointerOperand(); 357 Operand = Operand->stripPointerCasts(); 358 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 359 if (!GV->hasDefinitiveInitializer()) break; 360 361 // Get what we're pointing to and its size. 362 const PointerType *BaseType = 363 cast<PointerType>(Operand->getType()); 364 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 365 366 // Get the current byte offset into the thing. Use the original 367 // operand in case we're looking through a bitcast. 368 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 369 const PointerType *OffsetType = 370 cast<PointerType>(GEP->getPointerOperand()->getType()); 371 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 372 373 if (Size < Offset) { 374 // Out of bound reference? Negative index normalized to large 375 // index? Just return "I don't know". 376 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 377 return ReplaceInstUsesWith(CI, RetVal); 378 } 379 380 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 381 return ReplaceInstUsesWith(CI, RetVal); 382 383 } 384 385 // Do not return "I don't know" here. Later optimization passes could 386 // make it possible to evaluate objectsize to a constant. 387 break; 388 } 389 case Intrinsic::bswap: 390 // bswap(bswap(x)) -> x 391 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1))) 392 if (Operand->getIntrinsicID() == Intrinsic::bswap) 393 return ReplaceInstUsesWith(CI, Operand->getOperand(1)); 394 395 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 396 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) { 397 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 398 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 399 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 400 TI->getType()->getPrimitiveSizeInBits(); 401 Value *CV = ConstantInt::get(Operand->getType(), C); 402 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV); 403 return new TruncInst(V, TI->getType()); 404 } 405 } 406 407 break; 408 case Intrinsic::powi: 409 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) { 410 // powi(x, 0) -> 1.0 411 if (Power->isZero()) 412 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 413 // powi(x, 1) -> x 414 if (Power->isOne()) 415 return ReplaceInstUsesWith(CI, II->getOperand(1)); 416 // powi(x, -1) -> 1/x 417 if (Power->isAllOnesValue()) 418 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 419 II->getOperand(1)); 420 } 421 break; 422 case Intrinsic::cttz: { 423 // If all bits below the first known one are known zero, 424 // this value is constant. 425 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 426 uint32_t BitWidth = IT->getBitWidth(); 427 APInt KnownZero(BitWidth, 0); 428 APInt KnownOne(BitWidth, 0); 429 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 430 KnownZero, KnownOne); 431 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 432 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 433 if ((Mask & KnownZero) == Mask) 434 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 435 APInt(BitWidth, TrailingZeros))); 436 437 } 438 break; 439 case Intrinsic::ctlz: { 440 // If all bits above the first known one are known zero, 441 // this value is constant. 442 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 443 uint32_t BitWidth = IT->getBitWidth(); 444 APInt KnownZero(BitWidth, 0); 445 APInt KnownOne(BitWidth, 0); 446 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 447 KnownZero, KnownOne); 448 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 449 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 450 if ((Mask & KnownZero) == Mask) 451 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 452 APInt(BitWidth, LeadingZeros))); 453 454 } 455 break; 456 case Intrinsic::uadd_with_overflow: { 457 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2); 458 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 459 uint32_t BitWidth = IT->getBitWidth(); 460 APInt Mask = APInt::getSignBit(BitWidth); 461 APInt LHSKnownZero(BitWidth, 0); 462 APInt LHSKnownOne(BitWidth, 0); 463 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 464 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 465 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 466 467 if (LHSKnownNegative || LHSKnownPositive) { 468 APInt RHSKnownZero(BitWidth, 0); 469 APInt RHSKnownOne(BitWidth, 0); 470 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 471 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 472 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 473 if (LHSKnownNegative && RHSKnownNegative) { 474 // The sign bit is set in both cases: this MUST overflow. 475 // Create a simple add instruction, and insert it into the struct. 476 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 477 Worklist.Add(Add); 478 Constant *V[] = { 479 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 480 }; 481 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 482 return InsertValueInst::Create(Struct, Add, 0); 483 } 484 485 if (LHSKnownPositive && RHSKnownPositive) { 486 // The sign bit is clear in both cases: this CANNOT overflow. 487 // Create a simple add instruction, and insert it into the struct. 488 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 489 Worklist.Add(Add); 490 Constant *V[] = { 491 UndefValue::get(LHS->getType()), 492 ConstantInt::getFalse(II->getContext()) 493 }; 494 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 495 return InsertValueInst::Create(Struct, Add, 0); 496 } 497 } 498 } 499 // FALL THROUGH uadd into sadd 500 case Intrinsic::sadd_with_overflow: 501 // Canonicalize constants into the RHS. 502 if (isa<Constant>(II->getOperand(1)) && 503 !isa<Constant>(II->getOperand(2))) { 504 Value *LHS = II->getOperand(1); 505 II->setOperand(1, II->getOperand(2)); 506 II->setOperand(2, LHS); 507 return II; 508 } 509 510 // X + undef -> undef 511 if (isa<UndefValue>(II->getOperand(2))) 512 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 513 514 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 515 // X + 0 -> {X, false} 516 if (RHS->isZero()) { 517 Constant *V[] = { 518 UndefValue::get(II->getOperand(0)->getType()), 519 ConstantInt::getFalse(II->getContext()) 520 }; 521 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 522 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 523 } 524 } 525 break; 526 case Intrinsic::usub_with_overflow: 527 case Intrinsic::ssub_with_overflow: 528 // undef - X -> undef 529 // X - undef -> undef 530 if (isa<UndefValue>(II->getOperand(1)) || 531 isa<UndefValue>(II->getOperand(2))) 532 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 533 534 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 535 // X - 0 -> {X, false} 536 if (RHS->isZero()) { 537 Constant *V[] = { 538 UndefValue::get(II->getOperand(1)->getType()), 539 ConstantInt::getFalse(II->getContext()) 540 }; 541 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 542 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 543 } 544 } 545 break; 546 case Intrinsic::umul_with_overflow: 547 case Intrinsic::smul_with_overflow: 548 // Canonicalize constants into the RHS. 549 if (isa<Constant>(II->getOperand(1)) && 550 !isa<Constant>(II->getOperand(2))) { 551 Value *LHS = II->getOperand(1); 552 II->setOperand(1, II->getOperand(2)); 553 II->setOperand(2, LHS); 554 return II; 555 } 556 557 // X * undef -> undef 558 if (isa<UndefValue>(II->getOperand(2))) 559 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 560 561 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) { 562 // X*0 -> {0, false} 563 if (RHSI->isZero()) 564 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 565 566 // X * 1 -> {X, false} 567 if (RHSI->equalsInt(1)) { 568 Constant *V[] = { 569 UndefValue::get(II->getOperand(1)->getType()), 570 ConstantInt::getFalse(II->getContext()) 571 }; 572 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 573 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 574 } 575 } 576 break; 577 case Intrinsic::ppc_altivec_lvx: 578 case Intrinsic::ppc_altivec_lvxl: 579 case Intrinsic::x86_sse_loadu_ps: 580 case Intrinsic::x86_sse2_loadu_pd: 581 case Intrinsic::x86_sse2_loadu_dq: 582 // Turn PPC lvx -> load if the pointer is known aligned. 583 // Turn X86 loadups -> load if the pointer is known aligned. 584 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 585 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), 586 PointerType::getUnqual(II->getType())); 587 return new LoadInst(Ptr); 588 } 589 break; 590 case Intrinsic::ppc_altivec_stvx: 591 case Intrinsic::ppc_altivec_stvxl: 592 // Turn stvx -> store if the pointer is known aligned. 593 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) { 594 const Type *OpPtrTy = 595 PointerType::getUnqual(II->getOperand(1)->getType()); 596 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy); 597 return new StoreInst(II->getOperand(1), Ptr); 598 } 599 break; 600 case Intrinsic::x86_sse_storeu_ps: 601 case Intrinsic::x86_sse2_storeu_pd: 602 case Intrinsic::x86_sse2_storeu_dq: 603 // Turn X86 storeu -> store if the pointer is known aligned. 604 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 605 const Type *OpPtrTy = 606 PointerType::getUnqual(II->getOperand(2)->getType()); 607 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy); 608 return new StoreInst(II->getOperand(2), Ptr); 609 } 610 break; 611 612 case Intrinsic::x86_sse_cvttss2si: { 613 // These intrinsics only demands the 0th element of its input vector. If 614 // we can simplify the input based on that, do so now. 615 unsigned VWidth = 616 cast<VectorType>(II->getOperand(1)->getType())->getNumElements(); 617 APInt DemandedElts(VWidth, 1); 618 APInt UndefElts(VWidth, 0); 619 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts, 620 UndefElts)) { 621 II->setOperand(1, V); 622 return II; 623 } 624 break; 625 } 626 627 case Intrinsic::ppc_altivec_vperm: 628 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 629 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) { 630 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 631 632 // Check that all of the elements are integer constants or undefs. 633 bool AllEltsOk = true; 634 for (unsigned i = 0; i != 16; ++i) { 635 if (!isa<ConstantInt>(Mask->getOperand(i)) && 636 !isa<UndefValue>(Mask->getOperand(i))) { 637 AllEltsOk = false; 638 break; 639 } 640 } 641 642 if (AllEltsOk) { 643 // Cast the input vectors to byte vectors. 644 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType()); 645 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType()); 646 Value *Result = UndefValue::get(Op0->getType()); 647 648 // Only extract each element once. 649 Value *ExtractedElts[32]; 650 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 651 652 for (unsigned i = 0; i != 16; ++i) { 653 if (isa<UndefValue>(Mask->getOperand(i))) 654 continue; 655 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 656 Idx &= 31; // Match the hardware behavior. 657 658 if (ExtractedElts[Idx] == 0) { 659 ExtractedElts[Idx] = 660 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 661 ConstantInt::get(Type::getInt32Ty(II->getContext()), 662 Idx&15, false), "tmp"); 663 } 664 665 // Insert this value into the result vector. 666 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 667 ConstantInt::get(Type::getInt32Ty(II->getContext()), 668 i, false), "tmp"); 669 } 670 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 671 } 672 } 673 break; 674 675 case Intrinsic::stackrestore: { 676 // If the save is right next to the restore, remove the restore. This can 677 // happen when variable allocas are DCE'd. 678 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) { 679 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 680 BasicBlock::iterator BI = SS; 681 if (&*++BI == II) 682 return EraseInstFromFunction(CI); 683 } 684 } 685 686 // Scan down this block to see if there is another stack restore in the 687 // same block without an intervening call/alloca. 688 BasicBlock::iterator BI = II; 689 TerminatorInst *TI = II->getParent()->getTerminator(); 690 bool CannotRemove = false; 691 for (++BI; &*BI != TI; ++BI) { 692 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 693 CannotRemove = true; 694 break; 695 } 696 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 697 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 698 // If there is a stackrestore below this one, remove this one. 699 if (II->getIntrinsicID() == Intrinsic::stackrestore) 700 return EraseInstFromFunction(CI); 701 // Otherwise, ignore the intrinsic. 702 } else { 703 // If we found a non-intrinsic call, we can't remove the stack 704 // restore. 705 CannotRemove = true; 706 break; 707 } 708 } 709 } 710 711 // If the stack restore is in a return/unwind block and if there are no 712 // allocas or calls between the restore and the return, nuke the restore. 713 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 714 return EraseInstFromFunction(CI); 715 break; 716 } 717 } 718 719 return visitCallSite(II); 720} 721 722// InvokeInst simplification 723// 724Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 725 return visitCallSite(&II); 726} 727 728/// isSafeToEliminateVarargsCast - If this cast does not affect the value 729/// passed through the varargs area, we can eliminate the use of the cast. 730static bool isSafeToEliminateVarargsCast(const CallSite CS, 731 const CastInst * const CI, 732 const TargetData * const TD, 733 const int ix) { 734 if (!CI->isLosslessCast()) 735 return false; 736 737 // The size of ByVal arguments is derived from the type, so we 738 // can't change to a type with a different size. If the size were 739 // passed explicitly we could avoid this check. 740 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 741 return true; 742 743 const Type* SrcTy = 744 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 745 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 746 if (!SrcTy->isSized() || !DstTy->isSized()) 747 return false; 748 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 749 return false; 750 return true; 751} 752 753// visitCallSite - Improvements for call and invoke instructions. 754// 755Instruction *InstCombiner::visitCallSite(CallSite CS) { 756 bool Changed = false; 757 758 // If the callee is a constexpr cast of a function, attempt to move the cast 759 // to the arguments of the call/invoke. 760 if (transformConstExprCastCall(CS)) return 0; 761 762 Value *Callee = CS.getCalledValue(); 763 764 if (Function *CalleeF = dyn_cast<Function>(Callee)) 765 // If the call and callee calling conventions don't match, this call must 766 // be unreachable, as the call is undefined. 767 if (CalleeF->getCallingConv() != CS.getCallingConv() && 768 // Only do this for calls to a function with a body. A prototype may 769 // not actually end up matching the implementation's calling conv for a 770 // variety of reasons (e.g. it may be written in assembly). 771 !CalleeF->isDeclaration()) { 772 Instruction *OldCall = CS.getInstruction(); 773 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 774 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 775 OldCall); 776 // If OldCall dues not return void then replaceAllUsesWith undef. 777 // This allows ValueHandlers and custom metadata to adjust itself. 778 if (!OldCall->getType()->isVoidTy()) 779 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 780 if (isa<CallInst>(OldCall)) 781 return EraseInstFromFunction(*OldCall); 782 783 // We cannot remove an invoke, because it would change the CFG, just 784 // change the callee to a null pointer. 785 cast<InvokeInst>(OldCall)->setOperand(0, 786 Constant::getNullValue(CalleeF->getType())); 787 return 0; 788 } 789 790 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 791 // This instruction is not reachable, just remove it. We insert a store to 792 // undef so that we know that this code is not reachable, despite the fact 793 // that we can't modify the CFG here. 794 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 795 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 796 CS.getInstruction()); 797 798 // If CS dues not return void then replaceAllUsesWith undef. 799 // This allows ValueHandlers and custom metadata to adjust itself. 800 if (!CS.getInstruction()->getType()->isVoidTy()) 801 CS.getInstruction()-> 802 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 803 804 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 805 // Don't break the CFG, insert a dummy cond branch. 806 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 807 ConstantInt::getTrue(Callee->getContext()), II); 808 } 809 return EraseInstFromFunction(*CS.getInstruction()); 810 } 811 812 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 813 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 814 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 815 return transformCallThroughTrampoline(CS); 816 817 const PointerType *PTy = cast<PointerType>(Callee->getType()); 818 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 819 if (FTy->isVarArg()) { 820 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 821 // See if we can optimize any arguments passed through the varargs area of 822 // the call. 823 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 824 E = CS.arg_end(); I != E; ++I, ++ix) { 825 CastInst *CI = dyn_cast<CastInst>(*I); 826 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 827 *I = CI->getOperand(0); 828 Changed = true; 829 } 830 } 831 } 832 833 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 834 // Inline asm calls cannot throw - mark them 'nounwind'. 835 CS.setDoesNotThrow(); 836 Changed = true; 837 } 838 839 return Changed ? CS.getInstruction() : 0; 840} 841 842// transformConstExprCastCall - If the callee is a constexpr cast of a function, 843// attempt to move the cast to the arguments of the call/invoke. 844// 845bool InstCombiner::transformConstExprCastCall(CallSite CS) { 846 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 847 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 848 if (CE->getOpcode() != Instruction::BitCast || 849 !isa<Function>(CE->getOperand(0))) 850 return false; 851 Function *Callee = cast<Function>(CE->getOperand(0)); 852 Instruction *Caller = CS.getInstruction(); 853 const AttrListPtr &CallerPAL = CS.getAttributes(); 854 855 // Okay, this is a cast from a function to a different type. Unless doing so 856 // would cause a type conversion of one of our arguments, change this call to 857 // be a direct call with arguments casted to the appropriate types. 858 // 859 const FunctionType *FT = Callee->getFunctionType(); 860 const Type *OldRetTy = Caller->getType(); 861 const Type *NewRetTy = FT->getReturnType(); 862 863 if (NewRetTy->isStructTy()) 864 return false; // TODO: Handle multiple return values. 865 866 // Check to see if we are changing the return type... 867 if (OldRetTy != NewRetTy) { 868 if (Callee->isDeclaration() && 869 // Conversion is ok if changing from one pointer type to another or from 870 // a pointer to an integer of the same size. 871 !((OldRetTy->isPointerTy() || !TD || 872 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 873 (NewRetTy->isPointerTy() || !TD || 874 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 875 return false; // Cannot transform this return value. 876 877 if (!Caller->use_empty() && 878 // void -> non-void is handled specially 879 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 880 return false; // Cannot transform this return value. 881 882 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 883 Attributes RAttrs = CallerPAL.getRetAttributes(); 884 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 885 return false; // Attribute not compatible with transformed value. 886 } 887 888 // If the callsite is an invoke instruction, and the return value is used by 889 // a PHI node in a successor, we cannot change the return type of the call 890 // because there is no place to put the cast instruction (without breaking 891 // the critical edge). Bail out in this case. 892 if (!Caller->use_empty()) 893 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 894 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 895 UI != E; ++UI) 896 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 897 if (PN->getParent() == II->getNormalDest() || 898 PN->getParent() == II->getUnwindDest()) 899 return false; 900 } 901 902 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 903 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 904 905 CallSite::arg_iterator AI = CS.arg_begin(); 906 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 907 const Type *ParamTy = FT->getParamType(i); 908 const Type *ActTy = (*AI)->getType(); 909 910 if (!CastInst::isCastable(ActTy, ParamTy)) 911 return false; // Cannot transform this parameter value. 912 913 if (CallerPAL.getParamAttributes(i + 1) 914 & Attribute::typeIncompatible(ParamTy)) 915 return false; // Attribute not compatible with transformed value. 916 917 // Converting from one pointer type to another or between a pointer and an 918 // integer of the same size is safe even if we do not have a body. 919 bool isConvertible = ActTy == ParamTy || 920 (TD && ((ParamTy->isPointerTy() || 921 ParamTy == TD->getIntPtrType(Caller->getContext())) && 922 (ActTy->isPointerTy() || 923 ActTy == TD->getIntPtrType(Caller->getContext())))); 924 if (Callee->isDeclaration() && !isConvertible) return false; 925 } 926 927 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 928 Callee->isDeclaration()) 929 return false; // Do not delete arguments unless we have a function body. 930 931 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 932 !CallerPAL.isEmpty()) 933 // In this case we have more arguments than the new function type, but we 934 // won't be dropping them. Check that these extra arguments have attributes 935 // that are compatible with being a vararg call argument. 936 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 937 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 938 break; 939 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 940 if (PAttrs & Attribute::VarArgsIncompatible) 941 return false; 942 } 943 944 // Okay, we decided that this is a safe thing to do: go ahead and start 945 // inserting cast instructions as necessary... 946 std::vector<Value*> Args; 947 Args.reserve(NumActualArgs); 948 SmallVector<AttributeWithIndex, 8> attrVec; 949 attrVec.reserve(NumCommonArgs); 950 951 // Get any return attributes. 952 Attributes RAttrs = CallerPAL.getRetAttributes(); 953 954 // If the return value is not being used, the type may not be compatible 955 // with the existing attributes. Wipe out any problematic attributes. 956 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 957 958 // Add the new return attributes. 959 if (RAttrs) 960 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 961 962 AI = CS.arg_begin(); 963 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 964 const Type *ParamTy = FT->getParamType(i); 965 if ((*AI)->getType() == ParamTy) { 966 Args.push_back(*AI); 967 } else { 968 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 969 false, ParamTy, false); 970 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 971 } 972 973 // Add any parameter attributes. 974 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 975 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 976 } 977 978 // If the function takes more arguments than the call was taking, add them 979 // now. 980 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 981 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 982 983 // If we are removing arguments to the function, emit an obnoxious warning. 984 if (FT->getNumParams() < NumActualArgs) { 985 if (!FT->isVarArg()) { 986 errs() << "WARNING: While resolving call to function '" 987 << Callee->getName() << "' arguments were dropped!\n"; 988 } else { 989 // Add all of the arguments in their promoted form to the arg list. 990 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 991 const Type *PTy = getPromotedType((*AI)->getType()); 992 if (PTy != (*AI)->getType()) { 993 // Must promote to pass through va_arg area! 994 Instruction::CastOps opcode = 995 CastInst::getCastOpcode(*AI, false, PTy, false); 996 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 997 } else { 998 Args.push_back(*AI); 999 } 1000 1001 // Add any parameter attributes. 1002 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1003 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1004 } 1005 } 1006 } 1007 1008 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1009 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1010 1011 if (NewRetTy->isVoidTy()) 1012 Caller->setName(""); // Void type should not have a name. 1013 1014 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1015 attrVec.end()); 1016 1017 Instruction *NC; 1018 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1019 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1020 Args.begin(), Args.end(), 1021 Caller->getName(), Caller); 1022 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1023 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1024 } else { 1025 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1026 Caller->getName(), Caller); 1027 CallInst *CI = cast<CallInst>(Caller); 1028 if (CI->isTailCall()) 1029 cast<CallInst>(NC)->setTailCall(); 1030 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1031 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1032 } 1033 1034 // Insert a cast of the return type as necessary. 1035 Value *NV = NC; 1036 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1037 if (!NV->getType()->isVoidTy()) { 1038 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1039 OldRetTy, false); 1040 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1041 1042 // If this is an invoke instruction, we should insert it after the first 1043 // non-phi, instruction in the normal successor block. 1044 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1045 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1046 InsertNewInstBefore(NC, *I); 1047 } else { 1048 // Otherwise, it's a call, just insert cast right after the call instr 1049 InsertNewInstBefore(NC, *Caller); 1050 } 1051 Worklist.AddUsersToWorkList(*Caller); 1052 } else { 1053 NV = UndefValue::get(Caller->getType()); 1054 } 1055 } 1056 1057 1058 if (!Caller->use_empty()) 1059 Caller->replaceAllUsesWith(NV); 1060 1061 EraseInstFromFunction(*Caller); 1062 return true; 1063} 1064 1065// transformCallThroughTrampoline - Turn a call to a function created by the 1066// init_trampoline intrinsic into a direct call to the underlying function. 1067// 1068Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1069 Value *Callee = CS.getCalledValue(); 1070 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1071 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1072 const AttrListPtr &Attrs = CS.getAttributes(); 1073 1074 // If the call already has the 'nest' attribute somewhere then give up - 1075 // otherwise 'nest' would occur twice after splicing in the chain. 1076 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1077 return 0; 1078 1079 IntrinsicInst *Tramp = 1080 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1081 1082 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts()); 1083 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1084 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1085 1086 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1087 if (!NestAttrs.isEmpty()) { 1088 unsigned NestIdx = 1; 1089 const Type *NestTy = 0; 1090 Attributes NestAttr = Attribute::None; 1091 1092 // Look for a parameter marked with the 'nest' attribute. 1093 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1094 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1095 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1096 // Record the parameter type and any other attributes. 1097 NestTy = *I; 1098 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1099 break; 1100 } 1101 1102 if (NestTy) { 1103 Instruction *Caller = CS.getInstruction(); 1104 std::vector<Value*> NewArgs; 1105 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1106 1107 SmallVector<AttributeWithIndex, 8> NewAttrs; 1108 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1109 1110 // Insert the nest argument into the call argument list, which may 1111 // mean appending it. Likewise for attributes. 1112 1113 // Add any result attributes. 1114 if (Attributes Attr = Attrs.getRetAttributes()) 1115 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1116 1117 { 1118 unsigned Idx = 1; 1119 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1120 do { 1121 if (Idx == NestIdx) { 1122 // Add the chain argument and attributes. 1123 Value *NestVal = Tramp->getOperand(3); 1124 if (NestVal->getType() != NestTy) 1125 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1126 NewArgs.push_back(NestVal); 1127 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1128 } 1129 1130 if (I == E) 1131 break; 1132 1133 // Add the original argument and attributes. 1134 NewArgs.push_back(*I); 1135 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1136 NewAttrs.push_back 1137 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1138 1139 ++Idx, ++I; 1140 } while (1); 1141 } 1142 1143 // Add any function attributes. 1144 if (Attributes Attr = Attrs.getFnAttributes()) 1145 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1146 1147 // The trampoline may have been bitcast to a bogus type (FTy). 1148 // Handle this by synthesizing a new function type, equal to FTy 1149 // with the chain parameter inserted. 1150 1151 std::vector<const Type*> NewTypes; 1152 NewTypes.reserve(FTy->getNumParams()+1); 1153 1154 // Insert the chain's type into the list of parameter types, which may 1155 // mean appending it. 1156 { 1157 unsigned Idx = 1; 1158 FunctionType::param_iterator I = FTy->param_begin(), 1159 E = FTy->param_end(); 1160 1161 do { 1162 if (Idx == NestIdx) 1163 // Add the chain's type. 1164 NewTypes.push_back(NestTy); 1165 1166 if (I == E) 1167 break; 1168 1169 // Add the original type. 1170 NewTypes.push_back(*I); 1171 1172 ++Idx, ++I; 1173 } while (1); 1174 } 1175 1176 // Replace the trampoline call with a direct call. Let the generic 1177 // code sort out any function type mismatches. 1178 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1179 FTy->isVarArg()); 1180 Constant *NewCallee = 1181 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1182 NestF : ConstantExpr::getBitCast(NestF, 1183 PointerType::getUnqual(NewFTy)); 1184 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1185 NewAttrs.end()); 1186 1187 Instruction *NewCaller; 1188 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1189 NewCaller = InvokeInst::Create(NewCallee, 1190 II->getNormalDest(), II->getUnwindDest(), 1191 NewArgs.begin(), NewArgs.end(), 1192 Caller->getName(), Caller); 1193 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1194 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1195 } else { 1196 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1197 Caller->getName(), Caller); 1198 if (cast<CallInst>(Caller)->isTailCall()) 1199 cast<CallInst>(NewCaller)->setTailCall(); 1200 cast<CallInst>(NewCaller)-> 1201 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1202 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1203 } 1204 if (!Caller->getType()->isVoidTy()) 1205 Caller->replaceAllUsesWith(NewCaller); 1206 Caller->eraseFromParent(); 1207 Worklist.Remove(Caller); 1208 return 0; 1209 } 1210 } 1211 1212 // Replace the trampoline call with a direct call. Since there is no 'nest' 1213 // parameter, there is no need to adjust the argument list. Let the generic 1214 // code sort out any function type mismatches. 1215 Constant *NewCallee = 1216 NestF->getType() == PTy ? NestF : 1217 ConstantExpr::getBitCast(NestF, PTy); 1218 CS.setCalledFunction(NewCallee); 1219 return CS.getInstruction(); 1220} 1221 1222