InstCombineCalls.cpp revision e3305b17502c2a34152d4f50607b685eb2cadd21
1//===- InstCombineCalls.cpp -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visitCall and visitInvoke functions. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/Support/CallSite.h" 16#include "llvm/Target/TargetData.h" 17#include "llvm/Analysis/MemoryBuiltins.h" 18#include "llvm/Transforms/Utils/BuildLibCalls.h" 19#include "llvm/Transforms/Utils/Local.h" 20using namespace llvm; 21 22/// getPromotedType - Return the specified type promoted as it would be to pass 23/// though a va_arg area. 24static Type *getPromotedType(Type *Ty) { 25 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 26 if (ITy->getBitWidth() < 32) 27 return Type::getInt32Ty(Ty->getContext()); 28 } 29 return Ty; 30} 31 32 33Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 34 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD); 35 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD); 36 unsigned MinAlign = std::min(DstAlign, SrcAlign); 37 unsigned CopyAlign = MI->getAlignment(); 38 39 if (CopyAlign < MinAlign) { 40 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 41 MinAlign, false)); 42 return MI; 43 } 44 45 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 46 // load/store. 47 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 48 if (MemOpLength == 0) return 0; 49 50 // Source and destination pointer types are always "i8*" for intrinsic. See 51 // if the size is something we can handle with a single primitive load/store. 52 // A single load+store correctly handles overlapping memory in the memmove 53 // case. 54 unsigned Size = MemOpLength->getZExtValue(); 55 if (Size == 0) return MI; // Delete this mem transfer. 56 57 if (Size > 8 || (Size&(Size-1))) 58 return 0; // If not 1/2/4/8 bytes, exit. 59 60 // Use an integer load+store unless we can find something better. 61 unsigned SrcAddrSp = 62 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 63 unsigned DstAddrSp = 64 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 65 66 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 67 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 68 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 69 70 // Memcpy forces the use of i8* for the source and destination. That means 71 // that if you're using memcpy to move one double around, you'll get a cast 72 // from double* to i8*. We'd much rather use a double load+store rather than 73 // an i64 load+store, here because this improves the odds that the source or 74 // dest address will be promotable. See if we can find a better type than the 75 // integer datatype. 76 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 77 if (StrippedDest != MI->getArgOperand(0)) { 78 Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 79 ->getElementType(); 80 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 81 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 82 // down through these levels if so. 83 while (!SrcETy->isSingleValueType()) { 84 if (StructType *STy = dyn_cast<StructType>(SrcETy)) { 85 if (STy->getNumElements() == 1) 86 SrcETy = STy->getElementType(0); 87 else 88 break; 89 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 90 if (ATy->getNumElements() == 1) 91 SrcETy = ATy->getElementType(); 92 else 93 break; 94 } else 95 break; 96 } 97 98 if (SrcETy->isSingleValueType()) { 99 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 100 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 101 } 102 } 103 } 104 105 106 // If the memcpy/memmove provides better alignment info than we can 107 // infer, use it. 108 SrcAlign = std::max(SrcAlign, CopyAlign); 109 DstAlign = std::max(DstAlign, CopyAlign); 110 111 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 112 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 113 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile()); 114 L->setAlignment(SrcAlign); 115 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile()); 116 S->setAlignment(DstAlign); 117 118 // Set the size of the copy to 0, it will be deleted on the next iteration. 119 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 120 return MI; 121} 122 123Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 124 unsigned Alignment = getKnownAlignment(MI->getDest(), TD); 125 if (MI->getAlignment() < Alignment) { 126 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 127 Alignment, false)); 128 return MI; 129 } 130 131 // Extract the length and alignment and fill if they are constant. 132 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 133 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 134 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 135 return 0; 136 uint64_t Len = LenC->getZExtValue(); 137 Alignment = MI->getAlignment(); 138 139 // If the length is zero, this is a no-op 140 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 141 142 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 143 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 144 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 145 146 Value *Dest = MI->getDest(); 147 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 148 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 149 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy); 150 151 // Alignment 0 is identity for alignment 1 for memset, but not store. 152 if (Alignment == 0) Alignment = 1; 153 154 // Extract the fill value and store. 155 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 156 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest, 157 MI->isVolatile()); 158 S->setAlignment(Alignment); 159 160 // Set the size of the copy to 0, it will be deleted on the next iteration. 161 MI->setLength(Constant::getNullValue(LenC->getType())); 162 return MI; 163 } 164 165 return 0; 166} 167 168/// computeAllocSize - compute the object size allocated by an allocation 169/// site. Returns 0 if the size is not constant (in SizeValue), 1 if the size 170/// is constant (in Size), and 2 if the size could not be determined within the 171/// given maximum Penalty that the computation would incurr at run-time. 172static int computeAllocSize(Value *Alloc, uint64_t &Size, Value* &SizeValue, 173 uint64_t Penalty, TargetData *TD, 174 InstCombiner::BuilderTy *Builder) { 175 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Alloc)) { 176 if (GV->hasUniqueInitializer()) { 177 Constant *C = GV->getInitializer(); 178 Size = TD->getTypeAllocSize(C->getType()); 179 return 1; 180 } 181 // Can't determine size of the GV. 182 return 2; 183 184 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Alloc)) { 185 if (!AI->getAllocatedType()->isSized()) 186 return 2; 187 188 Size = TD->getTypeAllocSize(AI->getAllocatedType()); 189 if (!AI->isArrayAllocation()) 190 return 1; // we are done 191 192 Value *ArraySize = AI->getArraySize(); 193 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 194 Size *= C->getZExtValue(); 195 return 1; 196 } 197 198 if (Penalty < 2) 199 return 2; 200 201 SizeValue = Builder->CreateMul(Builder->getInt64(Size), ArraySize); 202 return 0; 203 204 } else if (CallInst *MI = extractMallocCall(Alloc)) { 205 SizeValue = MI->getArgOperand(0); 206 if (ConstantInt *CI = dyn_cast<ConstantInt>(SizeValue)) { 207 Size = CI->getZExtValue(); 208 return 1; 209 } 210 return 0; 211 212 } else if (CallInst *MI = extractCallocCall(Alloc)) { 213 Value *Arg1 = MI->getArgOperand(0); 214 Value *Arg2 = MI->getArgOperand(1); 215 if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1)) { 216 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2)) { 217 Size = (CI1->getValue() * CI2->getValue()).getZExtValue(); 218 return 1; 219 } 220 } 221 222 if (Penalty < 2) 223 return 2; 224 225 SizeValue = Builder->CreateMul(Arg1, Arg2); 226 return 0; 227 } 228 229 DEBUG(errs() << "computeAllocSize failed:\n"); 230 DEBUG(Alloc->dump()); 231 return 2; 232} 233 234/// visitCallInst - CallInst simplification. This mostly only handles folding 235/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 236/// the heavy lifting. 237/// 238Instruction *InstCombiner::visitCallInst(CallInst &CI) { 239 if (isFreeCall(&CI)) 240 return visitFree(CI); 241 if (extractMallocCall(&CI) || extractCallocCall(&CI)) 242 return visitMalloc(CI); 243 244 // If the caller function is nounwind, mark the call as nounwind, even if the 245 // callee isn't. 246 if (CI.getParent()->getParent()->doesNotThrow() && 247 !CI.doesNotThrow()) { 248 CI.setDoesNotThrow(); 249 return &CI; 250 } 251 252 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 253 if (!II) return visitCallSite(&CI); 254 255 // Intrinsics cannot occur in an invoke, so handle them here instead of in 256 // visitCallSite. 257 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 258 bool Changed = false; 259 260 // memmove/cpy/set of zero bytes is a noop. 261 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 262 if (NumBytes->isNullValue()) 263 return EraseInstFromFunction(CI); 264 265 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 266 if (CI->getZExtValue() == 1) { 267 // Replace the instruction with just byte operations. We would 268 // transform other cases to loads/stores, but we don't know if 269 // alignment is sufficient. 270 } 271 } 272 273 // No other transformations apply to volatile transfers. 274 if (MI->isVolatile()) 275 return 0; 276 277 // If we have a memmove and the source operation is a constant global, 278 // then the source and dest pointers can't alias, so we can change this 279 // into a call to memcpy. 280 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 281 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 282 if (GVSrc->isConstant()) { 283 Module *M = CI.getParent()->getParent()->getParent(); 284 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 285 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 286 CI.getArgOperand(1)->getType(), 287 CI.getArgOperand(2)->getType() }; 288 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 289 Changed = true; 290 } 291 } 292 293 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 294 // memmove(x,x,size) -> noop. 295 if (MTI->getSource() == MTI->getDest()) 296 return EraseInstFromFunction(CI); 297 } 298 299 // If we can determine a pointer alignment that is bigger than currently 300 // set, update the alignment. 301 if (isa<MemTransferInst>(MI)) { 302 if (Instruction *I = SimplifyMemTransfer(MI)) 303 return I; 304 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 305 if (Instruction *I = SimplifyMemSet(MSI)) 306 return I; 307 } 308 309 if (Changed) return II; 310 } 311 312 switch (II->getIntrinsicID()) { 313 default: break; 314 case Intrinsic::objectsize: { 315 // We need target data for just about everything so depend on it. 316 if (!TD) return 0; 317 318 Type *ReturnTy = CI.getType(); 319 uint64_t Penalty = cast<ConstantInt>(II->getArgOperand(2))->getZExtValue(); 320 321 // Get to the real allocated thing and offset as fast as possible. 322 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 323 324 uint64_t Offset = 0; 325 Value *OffsetValue; 326 bool ConstOffset = true; 327 328 // Try to look through constant GEPs. 329 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) { 330 if (!GEP->hasAllConstantIndices()) return 0; 331 332 // Get the current byte offset into the thing. Use the original 333 // operand in case we're looking through a bitcast. 334 SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end()); 335 if (!GEP->getPointerOperandType()->isPointerTy()) 336 return 0; 337 Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops); 338 339 Op1 = GEP->getPointerOperand()->stripPointerCasts(); 340 } 341 342 uint64_t Size; 343 Value *SizeValue; 344 int ConstAlloc = computeAllocSize(Op1, Size, SizeValue, Penalty, TD, 345 Builder); 346 347 // Do not return "I don't know" here. Later optimization passes could 348 // make it possible to evaluate objectsize to a constant. 349 if (ConstAlloc == 2) 350 return 0; 351 352 if (ConstOffset && ConstAlloc) { 353 if (Size < Offset) { 354 // Out of bounds 355 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 0)); 356 } 357 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset)); 358 359 } else if (Penalty >= 2) { 360 if (ConstOffset) 361 OffsetValue = Builder->getInt64(Offset); 362 if (ConstAlloc) 363 SizeValue = Builder->getInt64(Size); 364 365 Value *Val = Builder->CreateSub(SizeValue, OffsetValue); 366 Val = Builder->CreateTrunc(Val, ReturnTy); 367 // return 0 if there's an overflow 368 Value *Cmp = Builder->CreateICmpULT(SizeValue, OffsetValue); 369 Val = Builder->CreateSelect(Cmp, ConstantInt::get(ReturnTy, 0), Val); 370 return ReplaceInstUsesWith(CI, Val); 371 372 } else 373 return 0; 374 } 375 case Intrinsic::bswap: 376 // bswap(bswap(x)) -> x 377 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 378 if (Operand->getIntrinsicID() == Intrinsic::bswap) 379 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 380 381 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 382 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 383 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 384 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 385 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 386 TI->getType()->getPrimitiveSizeInBits(); 387 Value *CV = ConstantInt::get(Operand->getType(), C); 388 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 389 return new TruncInst(V, TI->getType()); 390 } 391 } 392 393 break; 394 case Intrinsic::powi: 395 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 396 // powi(x, 0) -> 1.0 397 if (Power->isZero()) 398 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 399 // powi(x, 1) -> x 400 if (Power->isOne()) 401 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 402 // powi(x, -1) -> 1/x 403 if (Power->isAllOnesValue()) 404 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 405 II->getArgOperand(0)); 406 } 407 break; 408 case Intrinsic::cttz: { 409 // If all bits below the first known one are known zero, 410 // this value is constant. 411 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType()); 412 // FIXME: Try to simplify vectors of integers. 413 if (!IT) break; 414 uint32_t BitWidth = IT->getBitWidth(); 415 APInt KnownZero(BitWidth, 0); 416 APInt KnownOne(BitWidth, 0); 417 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne); 418 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 419 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 420 if ((Mask & KnownZero) == Mask) 421 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 422 APInt(BitWidth, TrailingZeros))); 423 424 } 425 break; 426 case Intrinsic::ctlz: { 427 // If all bits above the first known one are known zero, 428 // this value is constant. 429 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType()); 430 // FIXME: Try to simplify vectors of integers. 431 if (!IT) break; 432 uint32_t BitWidth = IT->getBitWidth(); 433 APInt KnownZero(BitWidth, 0); 434 APInt KnownOne(BitWidth, 0); 435 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne); 436 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 437 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 438 if ((Mask & KnownZero) == Mask) 439 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 440 APInt(BitWidth, LeadingZeros))); 441 442 } 443 break; 444 case Intrinsic::uadd_with_overflow: { 445 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 446 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 447 uint32_t BitWidth = IT->getBitWidth(); 448 APInt LHSKnownZero(BitWidth, 0); 449 APInt LHSKnownOne(BitWidth, 0); 450 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne); 451 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 452 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 453 454 if (LHSKnownNegative || LHSKnownPositive) { 455 APInt RHSKnownZero(BitWidth, 0); 456 APInt RHSKnownOne(BitWidth, 0); 457 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne); 458 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 459 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 460 if (LHSKnownNegative && RHSKnownNegative) { 461 // The sign bit is set in both cases: this MUST overflow. 462 // Create a simple add instruction, and insert it into the struct. 463 Value *Add = Builder->CreateAdd(LHS, RHS); 464 Add->takeName(&CI); 465 Constant *V[] = { 466 UndefValue::get(LHS->getType()), 467 ConstantInt::getTrue(II->getContext()) 468 }; 469 StructType *ST = cast<StructType>(II->getType()); 470 Constant *Struct = ConstantStruct::get(ST, V); 471 return InsertValueInst::Create(Struct, Add, 0); 472 } 473 474 if (LHSKnownPositive && RHSKnownPositive) { 475 // The sign bit is clear in both cases: this CANNOT overflow. 476 // Create a simple add instruction, and insert it into the struct. 477 Value *Add = Builder->CreateNUWAdd(LHS, RHS); 478 Add->takeName(&CI); 479 Constant *V[] = { 480 UndefValue::get(LHS->getType()), 481 ConstantInt::getFalse(II->getContext()) 482 }; 483 StructType *ST = cast<StructType>(II->getType()); 484 Constant *Struct = ConstantStruct::get(ST, V); 485 return InsertValueInst::Create(Struct, Add, 0); 486 } 487 } 488 } 489 // FALL THROUGH uadd into sadd 490 case Intrinsic::sadd_with_overflow: 491 // Canonicalize constants into the RHS. 492 if (isa<Constant>(II->getArgOperand(0)) && 493 !isa<Constant>(II->getArgOperand(1))) { 494 Value *LHS = II->getArgOperand(0); 495 II->setArgOperand(0, II->getArgOperand(1)); 496 II->setArgOperand(1, LHS); 497 return II; 498 } 499 500 // X + undef -> undef 501 if (isa<UndefValue>(II->getArgOperand(1))) 502 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 503 504 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 505 // X + 0 -> {X, false} 506 if (RHS->isZero()) { 507 Constant *V[] = { 508 UndefValue::get(II->getArgOperand(0)->getType()), 509 ConstantInt::getFalse(II->getContext()) 510 }; 511 Constant *Struct = 512 ConstantStruct::get(cast<StructType>(II->getType()), V); 513 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 514 } 515 } 516 break; 517 case Intrinsic::usub_with_overflow: 518 case Intrinsic::ssub_with_overflow: 519 // undef - X -> undef 520 // X - undef -> undef 521 if (isa<UndefValue>(II->getArgOperand(0)) || 522 isa<UndefValue>(II->getArgOperand(1))) 523 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 524 525 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 526 // X - 0 -> {X, false} 527 if (RHS->isZero()) { 528 Constant *V[] = { 529 UndefValue::get(II->getArgOperand(0)->getType()), 530 ConstantInt::getFalse(II->getContext()) 531 }; 532 Constant *Struct = 533 ConstantStruct::get(cast<StructType>(II->getType()), V); 534 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 535 } 536 } 537 break; 538 case Intrinsic::umul_with_overflow: { 539 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 540 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth(); 541 542 APInt LHSKnownZero(BitWidth, 0); 543 APInt LHSKnownOne(BitWidth, 0); 544 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne); 545 APInt RHSKnownZero(BitWidth, 0); 546 APInt RHSKnownOne(BitWidth, 0); 547 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne); 548 549 // Get the largest possible values for each operand. 550 APInt LHSMax = ~LHSKnownZero; 551 APInt RHSMax = ~RHSKnownZero; 552 553 // If multiplying the maximum values does not overflow then we can turn 554 // this into a plain NUW mul. 555 bool Overflow; 556 LHSMax.umul_ov(RHSMax, Overflow); 557 if (!Overflow) { 558 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow"); 559 Constant *V[] = { 560 UndefValue::get(LHS->getType()), 561 Builder->getFalse() 562 }; 563 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V); 564 return InsertValueInst::Create(Struct, Mul, 0); 565 } 566 } // FALL THROUGH 567 case Intrinsic::smul_with_overflow: 568 // Canonicalize constants into the RHS. 569 if (isa<Constant>(II->getArgOperand(0)) && 570 !isa<Constant>(II->getArgOperand(1))) { 571 Value *LHS = II->getArgOperand(0); 572 II->setArgOperand(0, II->getArgOperand(1)); 573 II->setArgOperand(1, LHS); 574 return II; 575 } 576 577 // X * undef -> undef 578 if (isa<UndefValue>(II->getArgOperand(1))) 579 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 580 581 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 582 // X*0 -> {0, false} 583 if (RHSI->isZero()) 584 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 585 586 // X * 1 -> {X, false} 587 if (RHSI->equalsInt(1)) { 588 Constant *V[] = { 589 UndefValue::get(II->getArgOperand(0)->getType()), 590 ConstantInt::getFalse(II->getContext()) 591 }; 592 Constant *Struct = 593 ConstantStruct::get(cast<StructType>(II->getType()), V); 594 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 595 } 596 } 597 break; 598 case Intrinsic::ppc_altivec_lvx: 599 case Intrinsic::ppc_altivec_lvxl: 600 // Turn PPC lvx -> load if the pointer is known aligned. 601 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 602 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 603 PointerType::getUnqual(II->getType())); 604 return new LoadInst(Ptr); 605 } 606 break; 607 case Intrinsic::ppc_altivec_stvx: 608 case Intrinsic::ppc_altivec_stvxl: 609 // Turn stvx -> store if the pointer is known aligned. 610 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) { 611 Type *OpPtrTy = 612 PointerType::getUnqual(II->getArgOperand(0)->getType()); 613 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 614 return new StoreInst(II->getArgOperand(0), Ptr); 615 } 616 break; 617 case Intrinsic::x86_sse_storeu_ps: 618 case Intrinsic::x86_sse2_storeu_pd: 619 case Intrinsic::x86_sse2_storeu_dq: 620 // Turn X86 storeu -> store if the pointer is known aligned. 621 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 622 Type *OpPtrTy = 623 PointerType::getUnqual(II->getArgOperand(1)->getType()); 624 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 625 return new StoreInst(II->getArgOperand(1), Ptr); 626 } 627 break; 628 629 case Intrinsic::x86_sse_cvtss2si: 630 case Intrinsic::x86_sse_cvtss2si64: 631 case Intrinsic::x86_sse_cvttss2si: 632 case Intrinsic::x86_sse_cvttss2si64: 633 case Intrinsic::x86_sse2_cvtsd2si: 634 case Intrinsic::x86_sse2_cvtsd2si64: 635 case Intrinsic::x86_sse2_cvttsd2si: 636 case Intrinsic::x86_sse2_cvttsd2si64: { 637 // These intrinsics only demand the 0th element of their input vectors. If 638 // we can simplify the input based on that, do so now. 639 unsigned VWidth = 640 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 641 APInt DemandedElts(VWidth, 1); 642 APInt UndefElts(VWidth, 0); 643 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 644 DemandedElts, UndefElts)) { 645 II->setArgOperand(0, V); 646 return II; 647 } 648 break; 649 } 650 651 652 case Intrinsic::x86_sse41_pmovsxbw: 653 case Intrinsic::x86_sse41_pmovsxwd: 654 case Intrinsic::x86_sse41_pmovsxdq: 655 case Intrinsic::x86_sse41_pmovzxbw: 656 case Intrinsic::x86_sse41_pmovzxwd: 657 case Intrinsic::x86_sse41_pmovzxdq: { 658 // pmov{s|z}x ignores the upper half of their input vectors. 659 unsigned VWidth = 660 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 661 unsigned LowHalfElts = VWidth / 2; 662 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts)); 663 APInt UndefElts(VWidth, 0); 664 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), 665 InputDemandedElts, 666 UndefElts)) { 667 II->setArgOperand(0, TmpV); 668 return II; 669 } 670 break; 671 } 672 673 case Intrinsic::ppc_altivec_vperm: 674 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 675 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) { 676 assert(Mask->getType()->getVectorNumElements() == 16 && 677 "Bad type for intrinsic!"); 678 679 // Check that all of the elements are integer constants or undefs. 680 bool AllEltsOk = true; 681 for (unsigned i = 0; i != 16; ++i) { 682 Constant *Elt = Mask->getAggregateElement(i); 683 if (Elt == 0 || 684 !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) { 685 AllEltsOk = false; 686 break; 687 } 688 } 689 690 if (AllEltsOk) { 691 // Cast the input vectors to byte vectors. 692 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 693 Mask->getType()); 694 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 695 Mask->getType()); 696 Value *Result = UndefValue::get(Op0->getType()); 697 698 // Only extract each element once. 699 Value *ExtractedElts[32]; 700 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 701 702 for (unsigned i = 0; i != 16; ++i) { 703 if (isa<UndefValue>(Mask->getAggregateElement(i))) 704 continue; 705 unsigned Idx = 706 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue(); 707 Idx &= 31; // Match the hardware behavior. 708 709 if (ExtractedElts[Idx] == 0) { 710 ExtractedElts[Idx] = 711 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 712 Builder->getInt32(Idx&15)); 713 } 714 715 // Insert this value into the result vector. 716 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 717 Builder->getInt32(i)); 718 } 719 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 720 } 721 } 722 break; 723 724 case Intrinsic::arm_neon_vld1: 725 case Intrinsic::arm_neon_vld2: 726 case Intrinsic::arm_neon_vld3: 727 case Intrinsic::arm_neon_vld4: 728 case Intrinsic::arm_neon_vld2lane: 729 case Intrinsic::arm_neon_vld3lane: 730 case Intrinsic::arm_neon_vld4lane: 731 case Intrinsic::arm_neon_vst1: 732 case Intrinsic::arm_neon_vst2: 733 case Intrinsic::arm_neon_vst3: 734 case Intrinsic::arm_neon_vst4: 735 case Intrinsic::arm_neon_vst2lane: 736 case Intrinsic::arm_neon_vst3lane: 737 case Intrinsic::arm_neon_vst4lane: { 738 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD); 739 unsigned AlignArg = II->getNumArgOperands() - 1; 740 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 741 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 742 II->setArgOperand(AlignArg, 743 ConstantInt::get(Type::getInt32Ty(II->getContext()), 744 MemAlign, false)); 745 return II; 746 } 747 break; 748 } 749 750 case Intrinsic::arm_neon_vmulls: 751 case Intrinsic::arm_neon_vmullu: { 752 Value *Arg0 = II->getArgOperand(0); 753 Value *Arg1 = II->getArgOperand(1); 754 755 // Handle mul by zero first: 756 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 757 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 758 } 759 760 // Check for constant LHS & RHS - in this case we just simplify. 761 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu); 762 VectorType *NewVT = cast<VectorType>(II->getType()); 763 unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth(); 764 if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) { 765 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) { 766 VectorType* VT = cast<VectorType>(CV0->getType()); 767 SmallVector<Constant*, 4> NewElems; 768 for (unsigned i = 0; i < VT->getNumElements(); ++i) { 769 APInt CV0E = 770 (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue(); 771 CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth); 772 APInt CV1E = 773 (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue(); 774 CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth); 775 NewElems.push_back( 776 ConstantInt::get(NewVT->getElementType(), CV0E * CV1E)); 777 } 778 return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems)); 779 } 780 781 // Couldn't simplify - cannonicalize constant to the RHS. 782 std::swap(Arg0, Arg1); 783 } 784 785 // Handle mul by one: 786 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) { 787 if (ConstantInt *Splat = 788 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) { 789 if (Splat->isOne()) { 790 if (Zext) 791 return CastInst::CreateZExtOrBitCast(Arg0, II->getType()); 792 // else 793 return CastInst::CreateSExtOrBitCast(Arg0, II->getType()); 794 } 795 } 796 } 797 798 break; 799 } 800 801 case Intrinsic::stackrestore: { 802 // If the save is right next to the restore, remove the restore. This can 803 // happen when variable allocas are DCE'd. 804 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 805 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 806 BasicBlock::iterator BI = SS; 807 if (&*++BI == II) 808 return EraseInstFromFunction(CI); 809 } 810 } 811 812 // Scan down this block to see if there is another stack restore in the 813 // same block without an intervening call/alloca. 814 BasicBlock::iterator BI = II; 815 TerminatorInst *TI = II->getParent()->getTerminator(); 816 bool CannotRemove = false; 817 for (++BI; &*BI != TI; ++BI) { 818 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 819 CannotRemove = true; 820 break; 821 } 822 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 823 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 824 // If there is a stackrestore below this one, remove this one. 825 if (II->getIntrinsicID() == Intrinsic::stackrestore) 826 return EraseInstFromFunction(CI); 827 // Otherwise, ignore the intrinsic. 828 } else { 829 // If we found a non-intrinsic call, we can't remove the stack 830 // restore. 831 CannotRemove = true; 832 break; 833 } 834 } 835 } 836 837 // If the stack restore is in a return, resume, or unwind block and if there 838 // are no allocas or calls between the restore and the return, nuke the 839 // restore. 840 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 841 return EraseInstFromFunction(CI); 842 break; 843 } 844 } 845 846 return visitCallSite(II); 847} 848 849// InvokeInst simplification 850// 851Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 852 return visitCallSite(&II); 853} 854 855/// isSafeToEliminateVarargsCast - If this cast does not affect the value 856/// passed through the varargs area, we can eliminate the use of the cast. 857static bool isSafeToEliminateVarargsCast(const CallSite CS, 858 const CastInst * const CI, 859 const TargetData * const TD, 860 const int ix) { 861 if (!CI->isLosslessCast()) 862 return false; 863 864 // The size of ByVal arguments is derived from the type, so we 865 // can't change to a type with a different size. If the size were 866 // passed explicitly we could avoid this check. 867 if (!CS.isByValArgument(ix)) 868 return true; 869 870 Type* SrcTy = 871 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 872 Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 873 if (!SrcTy->isSized() || !DstTy->isSized()) 874 return false; 875 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 876 return false; 877 return true; 878} 879 880namespace { 881class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 882 InstCombiner *IC; 883protected: 884 void replaceCall(Value *With) { 885 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 886 } 887 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 888 if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp)) 889 return true; 890 if (ConstantInt *SizeCI = 891 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 892 if (SizeCI->isAllOnesValue()) 893 return true; 894 if (isString) { 895 uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp)); 896 // If the length is 0 we don't know how long it is and so we can't 897 // remove the check. 898 if (Len == 0) return false; 899 return SizeCI->getZExtValue() >= Len; 900 } 901 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 902 CI->getArgOperand(SizeArgOp))) 903 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 904 } 905 return false; 906 } 907public: 908 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 909 Instruction *NewInstruction; 910}; 911} // end anonymous namespace 912 913// Try to fold some different type of calls here. 914// Currently we're only working with the checking functions, memcpy_chk, 915// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 916// strcat_chk and strncat_chk. 917Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 918 if (CI->getCalledFunction() == 0) return 0; 919 920 InstCombineFortifiedLibCalls Simplifier(this); 921 Simplifier.fold(CI, TD); 922 return Simplifier.NewInstruction; 923} 924 925static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) { 926 // Strip off at most one level of pointer casts, looking for an alloca. This 927 // is good enough in practice and simpler than handling any number of casts. 928 Value *Underlying = TrampMem->stripPointerCasts(); 929 if (Underlying != TrampMem && 930 (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem)) 931 return 0; 932 if (!isa<AllocaInst>(Underlying)) 933 return 0; 934 935 IntrinsicInst *InitTrampoline = 0; 936 for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end(); 937 I != E; I++) { 938 IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I); 939 if (!II) 940 return 0; 941 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 942 if (InitTrampoline) 943 // More than one init_trampoline writes to this value. Give up. 944 return 0; 945 InitTrampoline = II; 946 continue; 947 } 948 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 949 // Allow any number of calls to adjust.trampoline. 950 continue; 951 return 0; 952 } 953 954 // No call to init.trampoline found. 955 if (!InitTrampoline) 956 return 0; 957 958 // Check that the alloca is being used in the expected way. 959 if (InitTrampoline->getOperand(0) != TrampMem) 960 return 0; 961 962 return InitTrampoline; 963} 964 965static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 966 Value *TrampMem) { 967 // Visit all the previous instructions in the basic block, and try to find a 968 // init.trampoline which has a direct path to the adjust.trampoline. 969 for (BasicBlock::iterator I = AdjustTramp, 970 E = AdjustTramp->getParent()->begin(); I != E; ) { 971 Instruction *Inst = --I; 972 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 973 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 974 II->getOperand(0) == TrampMem) 975 return II; 976 if (Inst->mayWriteToMemory()) 977 return 0; 978 } 979 return 0; 980} 981 982// Given a call to llvm.adjust.trampoline, find and return the corresponding 983// call to llvm.init.trampoline if the call to the trampoline can be optimized 984// to a direct call to a function. Otherwise return NULL. 985// 986static IntrinsicInst *FindInitTrampoline(Value *Callee) { 987 Callee = Callee->stripPointerCasts(); 988 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 989 if (!AdjustTramp || 990 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 991 return 0; 992 993 Value *TrampMem = AdjustTramp->getOperand(0); 994 995 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem)) 996 return IT; 997 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem)) 998 return IT; 999 return 0; 1000} 1001 1002// visitCallSite - Improvements for call and invoke instructions. 1003// 1004Instruction *InstCombiner::visitCallSite(CallSite CS) { 1005 bool Changed = false; 1006 1007 // If the callee is a pointer to a function, attempt to move any casts to the 1008 // arguments of the call/invoke. 1009 Value *Callee = CS.getCalledValue(); 1010 if (!isa<Function>(Callee) && transformConstExprCastCall(CS)) 1011 return 0; 1012 1013 if (Function *CalleeF = dyn_cast<Function>(Callee)) 1014 // If the call and callee calling conventions don't match, this call must 1015 // be unreachable, as the call is undefined. 1016 if (CalleeF->getCallingConv() != CS.getCallingConv() && 1017 // Only do this for calls to a function with a body. A prototype may 1018 // not actually end up matching the implementation's calling conv for a 1019 // variety of reasons (e.g. it may be written in assembly). 1020 !CalleeF->isDeclaration()) { 1021 Instruction *OldCall = CS.getInstruction(); 1022 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 1023 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 1024 OldCall); 1025 // If OldCall dues not return void then replaceAllUsesWith undef. 1026 // This allows ValueHandlers and custom metadata to adjust itself. 1027 if (!OldCall->getType()->isVoidTy()) 1028 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType())); 1029 if (isa<CallInst>(OldCall)) 1030 return EraseInstFromFunction(*OldCall); 1031 1032 // We cannot remove an invoke, because it would change the CFG, just 1033 // change the callee to a null pointer. 1034 cast<InvokeInst>(OldCall)->setCalledFunction( 1035 Constant::getNullValue(CalleeF->getType())); 1036 return 0; 1037 } 1038 1039 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 1040 // This instruction is not reachable, just remove it. We insert a store to 1041 // undef so that we know that this code is not reachable, despite the fact 1042 // that we can't modify the CFG here. 1043 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 1044 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 1045 CS.getInstruction()); 1046 1047 // If CS does not return void then replaceAllUsesWith undef. 1048 // This allows ValueHandlers and custom metadata to adjust itself. 1049 if (!CS.getInstruction()->getType()->isVoidTy()) 1050 ReplaceInstUsesWith(*CS.getInstruction(), 1051 UndefValue::get(CS.getInstruction()->getType())); 1052 1053 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 1054 // Don't break the CFG, insert a dummy cond branch. 1055 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 1056 ConstantInt::getTrue(Callee->getContext()), II); 1057 } 1058 return EraseInstFromFunction(*CS.getInstruction()); 1059 } 1060 1061 if (IntrinsicInst *II = FindInitTrampoline(Callee)) 1062 return transformCallThroughTrampoline(CS, II); 1063 1064 PointerType *PTy = cast<PointerType>(Callee->getType()); 1065 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1066 if (FTy->isVarArg()) { 1067 int ix = FTy->getNumParams(); 1068 // See if we can optimize any arguments passed through the varargs area of 1069 // the call. 1070 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 1071 E = CS.arg_end(); I != E; ++I, ++ix) { 1072 CastInst *CI = dyn_cast<CastInst>(*I); 1073 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 1074 *I = CI->getOperand(0); 1075 Changed = true; 1076 } 1077 } 1078 } 1079 1080 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 1081 // Inline asm calls cannot throw - mark them 'nounwind'. 1082 CS.setDoesNotThrow(); 1083 Changed = true; 1084 } 1085 1086 // Try to optimize the call if possible, we require TargetData for most of 1087 // this. None of these calls are seen as possibly dead so go ahead and 1088 // delete the instruction now. 1089 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 1090 Instruction *I = tryOptimizeCall(CI, TD); 1091 // If we changed something return the result, etc. Otherwise let 1092 // the fallthrough check. 1093 if (I) return EraseInstFromFunction(*I); 1094 } 1095 1096 return Changed ? CS.getInstruction() : 0; 1097} 1098 1099// transformConstExprCastCall - If the callee is a constexpr cast of a function, 1100// attempt to move the cast to the arguments of the call/invoke. 1101// 1102bool InstCombiner::transformConstExprCastCall(CallSite CS) { 1103 Function *Callee = 1104 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1105 if (Callee == 0) 1106 return false; 1107 Instruction *Caller = CS.getInstruction(); 1108 const AttrListPtr &CallerPAL = CS.getAttributes(); 1109 1110 // Okay, this is a cast from a function to a different type. Unless doing so 1111 // would cause a type conversion of one of our arguments, change this call to 1112 // be a direct call with arguments casted to the appropriate types. 1113 // 1114 FunctionType *FT = Callee->getFunctionType(); 1115 Type *OldRetTy = Caller->getType(); 1116 Type *NewRetTy = FT->getReturnType(); 1117 1118 if (NewRetTy->isStructTy()) 1119 return false; // TODO: Handle multiple return values. 1120 1121 // Check to see if we are changing the return type... 1122 if (OldRetTy != NewRetTy) { 1123 if (Callee->isDeclaration() && 1124 // Conversion is ok if changing from one pointer type to another or from 1125 // a pointer to an integer of the same size. 1126 !((OldRetTy->isPointerTy() || !TD || 1127 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 1128 (NewRetTy->isPointerTy() || !TD || 1129 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 1130 return false; // Cannot transform this return value. 1131 1132 if (!Caller->use_empty() && 1133 // void -> non-void is handled specially 1134 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 1135 return false; // Cannot transform this return value. 1136 1137 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 1138 Attributes RAttrs = CallerPAL.getRetAttributes(); 1139 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 1140 return false; // Attribute not compatible with transformed value. 1141 } 1142 1143 // If the callsite is an invoke instruction, and the return value is used by 1144 // a PHI node in a successor, we cannot change the return type of the call 1145 // because there is no place to put the cast instruction (without breaking 1146 // the critical edge). Bail out in this case. 1147 if (!Caller->use_empty()) 1148 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 1149 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 1150 UI != E; ++UI) 1151 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 1152 if (PN->getParent() == II->getNormalDest() || 1153 PN->getParent() == II->getUnwindDest()) 1154 return false; 1155 } 1156 1157 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 1158 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 1159 1160 CallSite::arg_iterator AI = CS.arg_begin(); 1161 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 1162 Type *ParamTy = FT->getParamType(i); 1163 Type *ActTy = (*AI)->getType(); 1164 1165 if (!CastInst::isCastable(ActTy, ParamTy)) 1166 return false; // Cannot transform this parameter value. 1167 1168 Attributes Attrs = CallerPAL.getParamAttributes(i + 1); 1169 if (Attrs & Attribute::typeIncompatible(ParamTy)) 1170 return false; // Attribute not compatible with transformed value. 1171 1172 // If the parameter is passed as a byval argument, then we have to have a 1173 // sized type and the sized type has to have the same size as the old type. 1174 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) { 1175 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 1176 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0) 1177 return false; 1178 1179 Type *CurElTy = cast<PointerType>(ActTy)->getElementType(); 1180 if (TD->getTypeAllocSize(CurElTy) != 1181 TD->getTypeAllocSize(ParamPTy->getElementType())) 1182 return false; 1183 } 1184 1185 // Converting from one pointer type to another or between a pointer and an 1186 // integer of the same size is safe even if we do not have a body. 1187 bool isConvertible = ActTy == ParamTy || 1188 (TD && ((ParamTy->isPointerTy() || 1189 ParamTy == TD->getIntPtrType(Caller->getContext())) && 1190 (ActTy->isPointerTy() || 1191 ActTy == TD->getIntPtrType(Caller->getContext())))); 1192 if (Callee->isDeclaration() && !isConvertible) return false; 1193 } 1194 1195 if (Callee->isDeclaration()) { 1196 // Do not delete arguments unless we have a function body. 1197 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 1198 return false; 1199 1200 // If the callee is just a declaration, don't change the varargsness of the 1201 // call. We don't want to introduce a varargs call where one doesn't 1202 // already exist. 1203 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType()); 1204 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg()) 1205 return false; 1206 1207 // If both the callee and the cast type are varargs, we still have to make 1208 // sure the number of fixed parameters are the same or we have the same 1209 // ABI issues as if we introduce a varargs call. 1210 if (FT->isVarArg() && 1211 cast<FunctionType>(APTy->getElementType())->isVarArg() && 1212 FT->getNumParams() != 1213 cast<FunctionType>(APTy->getElementType())->getNumParams()) 1214 return false; 1215 } 1216 1217 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 1218 !CallerPAL.isEmpty()) 1219 // In this case we have more arguments than the new function type, but we 1220 // won't be dropping them. Check that these extra arguments have attributes 1221 // that are compatible with being a vararg call argument. 1222 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 1223 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 1224 break; 1225 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1226 if (PAttrs & Attribute::VarArgsIncompatible) 1227 return false; 1228 } 1229 1230 1231 // Okay, we decided that this is a safe thing to do: go ahead and start 1232 // inserting cast instructions as necessary. 1233 std::vector<Value*> Args; 1234 Args.reserve(NumActualArgs); 1235 SmallVector<AttributeWithIndex, 8> attrVec; 1236 attrVec.reserve(NumCommonArgs); 1237 1238 // Get any return attributes. 1239 Attributes RAttrs = CallerPAL.getRetAttributes(); 1240 1241 // If the return value is not being used, the type may not be compatible 1242 // with the existing attributes. Wipe out any problematic attributes. 1243 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1244 1245 // Add the new return attributes. 1246 if (RAttrs) 1247 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1248 1249 AI = CS.arg_begin(); 1250 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1251 Type *ParamTy = FT->getParamType(i); 1252 if ((*AI)->getType() == ParamTy) { 1253 Args.push_back(*AI); 1254 } else { 1255 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1256 false, ParamTy, false); 1257 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy)); 1258 } 1259 1260 // Add any parameter attributes. 1261 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1262 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1263 } 1264 1265 // If the function takes more arguments than the call was taking, add them 1266 // now. 1267 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1268 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1269 1270 // If we are removing arguments to the function, emit an obnoxious warning. 1271 if (FT->getNumParams() < NumActualArgs) { 1272 if (!FT->isVarArg()) { 1273 errs() << "WARNING: While resolving call to function '" 1274 << Callee->getName() << "' arguments were dropped!\n"; 1275 } else { 1276 // Add all of the arguments in their promoted form to the arg list. 1277 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1278 Type *PTy = getPromotedType((*AI)->getType()); 1279 if (PTy != (*AI)->getType()) { 1280 // Must promote to pass through va_arg area! 1281 Instruction::CastOps opcode = 1282 CastInst::getCastOpcode(*AI, false, PTy, false); 1283 Args.push_back(Builder->CreateCast(opcode, *AI, PTy)); 1284 } else { 1285 Args.push_back(*AI); 1286 } 1287 1288 // Add any parameter attributes. 1289 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1290 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1291 } 1292 } 1293 } 1294 1295 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1296 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1297 1298 if (NewRetTy->isVoidTy()) 1299 Caller->setName(""); // Void type should not have a name. 1300 1301 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1302 attrVec.end()); 1303 1304 Instruction *NC; 1305 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1306 NC = Builder->CreateInvoke(Callee, II->getNormalDest(), 1307 II->getUnwindDest(), Args); 1308 NC->takeName(II); 1309 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1310 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1311 } else { 1312 CallInst *CI = cast<CallInst>(Caller); 1313 NC = Builder->CreateCall(Callee, Args); 1314 NC->takeName(CI); 1315 if (CI->isTailCall()) 1316 cast<CallInst>(NC)->setTailCall(); 1317 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1318 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1319 } 1320 1321 // Insert a cast of the return type as necessary. 1322 Value *NV = NC; 1323 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1324 if (!NV->getType()->isVoidTy()) { 1325 Instruction::CastOps opcode = 1326 CastInst::getCastOpcode(NC, false, OldRetTy, false); 1327 NV = NC = CastInst::Create(opcode, NC, OldRetTy); 1328 NC->setDebugLoc(Caller->getDebugLoc()); 1329 1330 // If this is an invoke instruction, we should insert it after the first 1331 // non-phi, instruction in the normal successor block. 1332 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1333 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); 1334 InsertNewInstBefore(NC, *I); 1335 } else { 1336 // Otherwise, it's a call, just insert cast right after the call. 1337 InsertNewInstBefore(NC, *Caller); 1338 } 1339 Worklist.AddUsersToWorkList(*Caller); 1340 } else { 1341 NV = UndefValue::get(Caller->getType()); 1342 } 1343 } 1344 1345 if (!Caller->use_empty()) 1346 ReplaceInstUsesWith(*Caller, NV); 1347 1348 EraseInstFromFunction(*Caller); 1349 return true; 1350} 1351 1352// transformCallThroughTrampoline - Turn a call to a function created by 1353// init_trampoline / adjust_trampoline intrinsic pair into a direct call to the 1354// underlying function. 1355// 1356Instruction * 1357InstCombiner::transformCallThroughTrampoline(CallSite CS, 1358 IntrinsicInst *Tramp) { 1359 Value *Callee = CS.getCalledValue(); 1360 PointerType *PTy = cast<PointerType>(Callee->getType()); 1361 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1362 const AttrListPtr &Attrs = CS.getAttributes(); 1363 1364 // If the call already has the 'nest' attribute somewhere then give up - 1365 // otherwise 'nest' would occur twice after splicing in the chain. 1366 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1367 return 0; 1368 1369 assert(Tramp && 1370 "transformCallThroughTrampoline called with incorrect CallSite."); 1371 1372 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1373 PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1374 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1375 1376 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1377 if (!NestAttrs.isEmpty()) { 1378 unsigned NestIdx = 1; 1379 Type *NestTy = 0; 1380 Attributes NestAttr = Attribute::None; 1381 1382 // Look for a parameter marked with the 'nest' attribute. 1383 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1384 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1385 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1386 // Record the parameter type and any other attributes. 1387 NestTy = *I; 1388 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1389 break; 1390 } 1391 1392 if (NestTy) { 1393 Instruction *Caller = CS.getInstruction(); 1394 std::vector<Value*> NewArgs; 1395 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1396 1397 SmallVector<AttributeWithIndex, 8> NewAttrs; 1398 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1399 1400 // Insert the nest argument into the call argument list, which may 1401 // mean appending it. Likewise for attributes. 1402 1403 // Add any result attributes. 1404 if (Attributes Attr = Attrs.getRetAttributes()) 1405 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1406 1407 { 1408 unsigned Idx = 1; 1409 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1410 do { 1411 if (Idx == NestIdx) { 1412 // Add the chain argument and attributes. 1413 Value *NestVal = Tramp->getArgOperand(2); 1414 if (NestVal->getType() != NestTy) 1415 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest"); 1416 NewArgs.push_back(NestVal); 1417 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1418 } 1419 1420 if (I == E) 1421 break; 1422 1423 // Add the original argument and attributes. 1424 NewArgs.push_back(*I); 1425 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1426 NewAttrs.push_back 1427 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1428 1429 ++Idx, ++I; 1430 } while (1); 1431 } 1432 1433 // Add any function attributes. 1434 if (Attributes Attr = Attrs.getFnAttributes()) 1435 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1436 1437 // The trampoline may have been bitcast to a bogus type (FTy). 1438 // Handle this by synthesizing a new function type, equal to FTy 1439 // with the chain parameter inserted. 1440 1441 std::vector<Type*> NewTypes; 1442 NewTypes.reserve(FTy->getNumParams()+1); 1443 1444 // Insert the chain's type into the list of parameter types, which may 1445 // mean appending it. 1446 { 1447 unsigned Idx = 1; 1448 FunctionType::param_iterator I = FTy->param_begin(), 1449 E = FTy->param_end(); 1450 1451 do { 1452 if (Idx == NestIdx) 1453 // Add the chain's type. 1454 NewTypes.push_back(NestTy); 1455 1456 if (I == E) 1457 break; 1458 1459 // Add the original type. 1460 NewTypes.push_back(*I); 1461 1462 ++Idx, ++I; 1463 } while (1); 1464 } 1465 1466 // Replace the trampoline call with a direct call. Let the generic 1467 // code sort out any function type mismatches. 1468 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1469 FTy->isVarArg()); 1470 Constant *NewCallee = 1471 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1472 NestF : ConstantExpr::getBitCast(NestF, 1473 PointerType::getUnqual(NewFTy)); 1474 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1475 NewAttrs.end()); 1476 1477 Instruction *NewCaller; 1478 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1479 NewCaller = InvokeInst::Create(NewCallee, 1480 II->getNormalDest(), II->getUnwindDest(), 1481 NewArgs); 1482 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1483 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1484 } else { 1485 NewCaller = CallInst::Create(NewCallee, NewArgs); 1486 if (cast<CallInst>(Caller)->isTailCall()) 1487 cast<CallInst>(NewCaller)->setTailCall(); 1488 cast<CallInst>(NewCaller)-> 1489 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1490 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1491 } 1492 1493 return NewCaller; 1494 } 1495 } 1496 1497 // Replace the trampoline call with a direct call. Since there is no 'nest' 1498 // parameter, there is no need to adjust the argument list. Let the generic 1499 // code sort out any function type mismatches. 1500 Constant *NewCallee = 1501 NestF->getType() == PTy ? NestF : 1502 ConstantExpr::getBitCast(NestF, PTy); 1503 CS.setCalledFunction(NewCallee); 1504 return CS.getInstruction(); 1505} 1506