InstCombineCalls.cpp revision 33591af872045194dc00321041affb92810183b4
131d157ae1ac2cd9c787dc3c1d28e64c682803844Jia Liu//===- InstCombineCalls.cpp -----------------------------------------------===// 2b5f662fa0314f7e7e690aae8ebff7136cc3a5ab0Misha Brukman// 3e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke// The LLVM Compiler Infrastructure 4e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke// 54ee451de366474b9c228b4e5fa573795a715216dChris Lattner// This file is distributed under the University of Illinois Open Source 64ee451de366474b9c228b4e5fa573795a715216dChris Lattner// License. See LICENSE.TXT for details. 7b5f662fa0314f7e7e690aae8ebff7136cc3a5ab0Misha Brukman// 8e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke//===----------------------------------------------------------------------===// 9e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke// 107c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattner// This file implements the visitCall and visitInvoke functions. 11e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke// 12e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke//===----------------------------------------------------------------------===// 13e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke 147c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattner#include "InstCombine.h" 157c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattner#include "llvm/IntrinsicInst.h" 16e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke#include "llvm/Support/CallSite.h" 177c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattner#include "llvm/Target/TargetData.h" 1879aa3417eb6f58d668aadfedf075240a41d35a26Craig Topper#include "llvm/Analysis/MemoryBuiltins.h" 19e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke#include "llvm/Transforms/Utils/BuildLibCalls.h" 204db3cffe94a5285239cc0056f939c6b74a5ca0b6Evan Chengusing namespace llvm; 214db3cffe94a5285239cc0056f939c6b74a5ca0b6Evan Cheng 224db3cffe94a5285239cc0056f939c6b74a5ca0b6Evan Cheng/// getPromotedType - Return the specified type promoted as it would be to pass 23e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke/// though a va_arg area. 24e785e531f4495068ee46cabd926939eec15a565aBrian Gaekestatic const Type *getPromotedType(const Type *Ty) { 257c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattner if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 267d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke if (ITy->getBitWidth() < 32) 277d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke return Type::getInt32Ty(Ty->getContext()); 287c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattner } 297d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke return Ty; 307d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke} 317d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke 327d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke/// EnforceKnownAlignment - If the specified pointer points to an object that 337d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke/// we control, modify the object's alignment to PrefAlign. This isn't 347d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke/// often possible though. If alignment is important, a more reliable approach 35d74ea2bbd8bb630331f35ead42d385249bd42af8Chris Lattner/// is to simply align all global variables and allocation instructions to 367d7ac63366956473c8b3ef790447f576315e4c21Brian Gaeke/// their preferred alignment from the beginning. 374db3cffe94a5285239cc0056f939c6b74a5ca0b6Evan Cheng/// 387c90f73a1b06040d971a3dd95a491031ae6238d5Chris Lattnerstatic unsigned EnforceKnownAlignment(Value *V, 39d10fd9791c20fd8368fa0ce94b626b769c6c8ba0Owen Anderson unsigned Align, unsigned PrefAlign) { 40354362524a72b3fa43a6c09380b7ae3b2380cbbaJuergen Ributzka 41e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke User *U = dyn_cast<User>(V); 42950a4c40b823cd4f09dc71be635229246dfd6cacDan Gohman if (!U) return Align; 43e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke 44e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke switch (Operator::getOpcode(U)) { 45e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke default: break; 46e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke case Instruction::BitCast: 47e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 48dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines case Instruction::GetElementPtr: { 491d6dc974631a8920a4e5a801a6c7cd4753ae8a8eChris Lattner // If all indexes are zero, it is just the alignment of the base pointer. 505ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner bool AllZeroOperands = true; 515ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 525ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner if (!isa<Constant>(*i) || 535ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner !cast<Constant>(*i)->isNullValue()) { 545ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner AllZeroOperands = false; 55dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines break; 56dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines } 571e06bcbd633175d75d13aaa5695ca0633ba86068Venkatraman Govindaraju 585ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner if (AllZeroOperands) { 595ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner // Treat this like a bitcast. 605ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 615ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner } 625ccc7225db0cb4d738045ade8e8c38d5345ac08aChris Lattner return Align; 63dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines } 64dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines case Instruction::Alloca: { 65dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines AllocaInst *AI = cast<AllocaInst>(V); 66dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // If there is a requested alignment and if this is an alloca, round up. 67dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines if (AI->getAlignment() >= PrefAlign) 68dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines return AI->getAlignment(); 69dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines AI->setAlignment(PrefAlign); 70dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines return PrefAlign; 71dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines } 72dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines } 73dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines 74dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 75dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // If there is a large requested alignment and we can, bump up the alignment 76dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // of the global. 77dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines if (GV->isDeclaration()) return Align; 78dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines 79dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines if (GV->getAlignment() >= PrefAlign) 80dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines return GV->getAlignment(); 81dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // We can only increase the alignment of the global if it has no alignment 82dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // specified or if it is not assigned a section. If it is assigned a 83dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // section, the global could be densely packed with other objects in the 84dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines // section, increasing the alignment could cause padding issues. 85dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines if (!GV->hasSection() || GV->getAlignment() == 0) 86dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines GV->setAlignment(PrefAlign); 87dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines return GV->getAlignment(); 88dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines } 89dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines 90dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines return Align; 91dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines} 92dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines 93dce4a407a24b04eebc6a376f8e62b41aaa7b071fStephen Hines/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 941e06bcbd633175d75d13aaa5695ca0633ba86068Venkatraman Govindaraju/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 95db486a6d5311944f61b92db9f6074944dbbdb242Chris Lattner/// and it is more than the alignment of the ultimate object, see if we can 96e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke/// increase the alignment of the ultimate object, making this check succeed. 97e785e531f4495068ee46cabd926939eec15a565aBrian Gaekeunsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 98e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke unsigned PrefAlign) { 99e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke assert(V->getType()->isPointerTy() && 100e785e531f4495068ee46cabd926939eec15a565aBrian Gaeke "GetOrEnforceKnownAlignment expects a pointer!"); 101 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; 102 APInt Mask = APInt::getAllOnesValue(BitWidth); 103 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 104 ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 105 unsigned TrailZ = KnownZero.countTrailingOnes(); 106 107 // LLVM doesn't support alignments larger than this currently. 108 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 109 110 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 111 112 if (PrefAlign > Align) 113 Align = EnforceKnownAlignment(V, Align, PrefAlign); 114 115 // We don't need to make any adjustment. 116 return Align; 117} 118 119Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 120 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0)); 121 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1)); 122 unsigned MinAlign = std::min(DstAlign, SrcAlign); 123 unsigned CopyAlign = MI->getAlignment(); 124 125 if (CopyAlign < MinAlign) { 126 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 127 MinAlign, false)); 128 return MI; 129 } 130 131 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 132 // load/store. 133 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2)); 134 if (MemOpLength == 0) return 0; 135 136 // Source and destination pointer types are always "i8*" for intrinsic. See 137 // if the size is something we can handle with a single primitive load/store. 138 // A single load+store correctly handles overlapping memory in the memmove 139 // case. 140 unsigned Size = MemOpLength->getZExtValue(); 141 if (Size == 0) return MI; // Delete this mem transfer. 142 143 if (Size > 8 || (Size&(Size-1))) 144 return 0; // If not 1/2/4/8 bytes, exit. 145 146 // Use an integer load+store unless we can find something better. 147 unsigned SrcAddrSp = 148 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 149 unsigned DstAddrSp = 150 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 151 152 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 153 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 154 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 155 156 // Memcpy forces the use of i8* for the source and destination. That means 157 // that if you're using memcpy to move one double around, you'll get a cast 158 // from double* to i8*. We'd much rather use a double load+store rather than 159 // an i64 load+store, here because this improves the odds that the source or 160 // dest address will be promotable. See if we can find a better type than the 161 // integer datatype. 162 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); 163 if (StrippedDest != MI->getArgOperand(0)) { 164 const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 165 ->getElementType(); 166 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 167 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 168 // down through these levels if so. 169 while (!SrcETy->isSingleValueType()) { 170 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 171 if (STy->getNumElements() == 1) 172 SrcETy = STy->getElementType(0); 173 else 174 break; 175 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 176 if (ATy->getNumElements() == 1) 177 SrcETy = ATy->getElementType(); 178 else 179 break; 180 } else 181 break; 182 } 183 184 if (SrcETy->isSingleValueType()) { 185 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); 186 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); 187 } 188 } 189 } 190 191 192 // If the memcpy/memmove provides better alignment info than we can 193 // infer, use it. 194 SrcAlign = std::max(SrcAlign, CopyAlign); 195 DstAlign = std::max(DstAlign, CopyAlign); 196 197 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 198 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 199 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign); 200 InsertNewInstBefore(L, *MI); 201 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign), 202 *MI); 203 204 // Set the size of the copy to 0, it will be deleted on the next iteration. 205 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); 206 return MI; 207} 208 209Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 210 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 211 if (MI->getAlignment() < Alignment) { 212 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 213 Alignment, false)); 214 return MI; 215 } 216 217 // Extract the length and alignment and fill if they are constant. 218 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 219 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 220 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 221 return 0; 222 uint64_t Len = LenC->getZExtValue(); 223 Alignment = MI->getAlignment(); 224 225 // If the length is zero, this is a no-op 226 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 227 228 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 229 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 230 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 231 232 Value *Dest = MI->getDest(); 233 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 234 235 // Alignment 0 is identity for alignment 1 for memset, but not store. 236 if (Alignment == 0) Alignment = 1; 237 238 // Extract the fill value and store. 239 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 240 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 241 Dest, false, Alignment), *MI); 242 243 // Set the size of the copy to 0, it will be deleted on the next iteration. 244 MI->setLength(Constant::getNullValue(LenC->getType())); 245 return MI; 246 } 247 248 return 0; 249} 250 251/// visitCallInst - CallInst simplification. This mostly only handles folding 252/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 253/// the heavy lifting. 254/// 255Instruction *InstCombiner::visitCallInst(CallInst &CI) { 256 if (isFreeCall(&CI)) 257 return visitFree(CI); 258 if (isMalloc(&CI)) 259 return visitMalloc(CI); 260 261 // If the caller function is nounwind, mark the call as nounwind, even if the 262 // callee isn't. 263 if (CI.getParent()->getParent()->doesNotThrow() && 264 !CI.doesNotThrow()) { 265 CI.setDoesNotThrow(); 266 return &CI; 267 } 268 269 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 270 if (!II) return visitCallSite(&CI); 271 272 // Intrinsics cannot occur in an invoke, so handle them here instead of in 273 // visitCallSite. 274 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 275 bool Changed = false; 276 277 // memmove/cpy/set of zero bytes is a noop. 278 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 279 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 280 281 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 282 if (CI->getZExtValue() == 1) { 283 // Replace the instruction with just byte operations. We would 284 // transform other cases to loads/stores, but we don't know if 285 // alignment is sufficient. 286 } 287 } 288 289 // If we have a memmove and the source operation is a constant global, 290 // then the source and dest pointers can't alias, so we can change this 291 // into a call to memcpy. 292 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 293 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 294 if (GVSrc->isConstant()) { 295 Module *M = CI.getParent()->getParent()->getParent(); 296 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 297 const Type *Tys[3] = { CI.getArgOperand(0)->getType(), 298 CI.getArgOperand(1)->getType(), 299 CI.getArgOperand(2)->getType() }; 300 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3)); 301 Changed = true; 302 } 303 } 304 305 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 306 // memmove(x,x,size) -> noop. 307 if (MTI->getSource() == MTI->getDest()) 308 return EraseInstFromFunction(CI); 309 } 310 311 // If we can determine a pointer alignment that is bigger than currently 312 // set, update the alignment. 313 if (isa<MemTransferInst>(MI)) { 314 if (Instruction *I = SimplifyMemTransfer(MI)) 315 return I; 316 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 317 if (Instruction *I = SimplifyMemSet(MSI)) 318 return I; 319 } 320 321 if (Changed) return II; 322 } 323 324 switch (II->getIntrinsicID()) { 325 default: break; 326 case Intrinsic::objectsize: { 327 // We need target data for just about everything so depend on it. 328 if (!TD) break; 329 330 const Type *ReturnTy = CI.getType(); 331 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); 332 333 // Get to the real allocated thing and offset as fast as possible. 334 Value *Op1 = II->getArgOperand(0)->stripPointerCasts(); 335 336 // If we've stripped down to a single global variable that we 337 // can know the size of then just return that. 338 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 339 if (GV->hasDefinitiveInitializer()) { 340 Constant *C = GV->getInitializer(); 341 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType()); 342 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize)); 343 } else { 344 // Can't determine size of the GV. 345 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 346 return ReplaceInstUsesWith(CI, RetVal); 347 } 348 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) { 349 // Get alloca size. 350 if (AI->getAllocatedType()->isSized()) { 351 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); 352 if (AI->isArrayAllocation()) { 353 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize()); 354 if (!C) break; 355 AllocaSize *= C->getZExtValue(); 356 } 357 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize)); 358 } 359 } else if (CallInst *MI = extractMallocCall(Op1)) { 360 const Type* MallocType = getMallocAllocatedType(MI); 361 // Get alloca size. 362 if (MallocType && MallocType->isSized()) { 363 if (Value *NElems = getMallocArraySize(MI, TD, true)) { 364 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 365 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 366 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType)))); 367 } 368 } 369 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) { 370 // Only handle constant GEPs here. 371 if (CE->getOpcode() != Instruction::GetElementPtr) break; 372 GEPOperator *GEP = cast<GEPOperator>(CE); 373 374 // Make sure we're not a constant offset from an external 375 // global. 376 Value *Operand = GEP->getPointerOperand(); 377 Operand = Operand->stripPointerCasts(); 378 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) 379 if (!GV->hasDefinitiveInitializer()) break; 380 381 // Get what we're pointing to and its size. 382 const PointerType *BaseType = 383 cast<PointerType>(Operand->getType()); 384 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType()); 385 386 // Get the current byte offset into the thing. Use the original 387 // operand in case we're looking through a bitcast. 388 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end()); 389 const PointerType *OffsetType = 390 cast<PointerType>(GEP->getPointerOperand()->getType()); 391 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size()); 392 393 if (Size < Offset) { 394 // Out of bound reference? Negative index normalized to large 395 // index? Just return "I don't know". 396 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 397 return ReplaceInstUsesWith(CI, RetVal); 398 } 399 400 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset); 401 return ReplaceInstUsesWith(CI, RetVal); 402 } 403 404 // Do not return "I don't know" here. Later optimization passes could 405 // make it possible to evaluate objectsize to a constant. 406 break; 407 } 408 case Intrinsic::bswap: 409 // bswap(bswap(x)) -> x 410 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) 411 if (Operand->getIntrinsicID() == Intrinsic::bswap) 412 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0)); 413 414 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 415 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) { 416 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 417 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 418 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 419 TI->getType()->getPrimitiveSizeInBits(); 420 Value *CV = ConstantInt::get(Operand->getType(), C); 421 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV); 422 return new TruncInst(V, TI->getType()); 423 } 424 } 425 426 break; 427 case Intrinsic::powi: 428 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 429 // powi(x, 0) -> 1.0 430 if (Power->isZero()) 431 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 432 // powi(x, 1) -> x 433 if (Power->isOne()) 434 return ReplaceInstUsesWith(CI, II->getArgOperand(0)); 435 // powi(x, -1) -> 1/x 436 if (Power->isAllOnesValue()) 437 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 438 II->getArgOperand(0)); 439 } 440 break; 441 case Intrinsic::cttz: { 442 // If all bits below the first known one are known zero, 443 // this value is constant. 444 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 445 uint32_t BitWidth = IT->getBitWidth(); 446 APInt KnownZero(BitWidth, 0); 447 APInt KnownOne(BitWidth, 0); 448 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 449 KnownZero, KnownOne); 450 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 451 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 452 if ((Mask & KnownZero) == Mask) 453 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 454 APInt(BitWidth, TrailingZeros))); 455 456 } 457 break; 458 case Intrinsic::ctlz: { 459 // If all bits above the first known one are known zero, 460 // this value is constant. 461 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 462 uint32_t BitWidth = IT->getBitWidth(); 463 APInt KnownZero(BitWidth, 0); 464 APInt KnownOne(BitWidth, 0); 465 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth), 466 KnownZero, KnownOne); 467 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 468 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 469 if ((Mask & KnownZero) == Mask) 470 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 471 APInt(BitWidth, LeadingZeros))); 472 473 } 474 break; 475 case Intrinsic::uadd_with_overflow: { 476 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 477 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType()); 478 uint32_t BitWidth = IT->getBitWidth(); 479 APInt Mask = APInt::getSignBit(BitWidth); 480 APInt LHSKnownZero(BitWidth, 0); 481 APInt LHSKnownOne(BitWidth, 0); 482 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 483 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 484 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 485 486 if (LHSKnownNegative || LHSKnownPositive) { 487 APInt RHSKnownZero(BitWidth, 0); 488 APInt RHSKnownOne(BitWidth, 0); 489 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 490 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 491 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 492 if (LHSKnownNegative && RHSKnownNegative) { 493 // The sign bit is set in both cases: this MUST overflow. 494 // Create a simple add instruction, and insert it into the struct. 495 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 496 Worklist.Add(Add); 497 Constant *V[] = { 498 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 499 }; 500 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 501 return InsertValueInst::Create(Struct, Add, 0); 502 } 503 504 if (LHSKnownPositive && RHSKnownPositive) { 505 // The sign bit is clear in both cases: this CANNOT overflow. 506 // Create a simple add instruction, and insert it into the struct. 507 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 508 Worklist.Add(Add); 509 Constant *V[] = { 510 UndefValue::get(LHS->getType()), 511 ConstantInt::getFalse(II->getContext()) 512 }; 513 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 514 return InsertValueInst::Create(Struct, Add, 0); 515 } 516 } 517 } 518 // FALL THROUGH uadd into sadd 519 case Intrinsic::sadd_with_overflow: 520 // Canonicalize constants into the RHS. 521 if (isa<Constant>(II->getArgOperand(0)) && 522 !isa<Constant>(II->getArgOperand(1))) { 523 Value *LHS = II->getArgOperand(0); 524 II->setArgOperand(0, II->getArgOperand(1)); 525 II->setArgOperand(1, LHS); 526 return II; 527 } 528 529 // X + undef -> undef 530 if (isa<UndefValue>(II->getArgOperand(1))) 531 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 532 533 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 534 // X + 0 -> {X, false} 535 if (RHS->isZero()) { 536 Constant *V[] = { 537 UndefValue::get(II->getCalledValue()->getType()), 538 ConstantInt::getFalse(II->getContext()) 539 }; 540 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 541 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 542 } 543 } 544 break; 545 case Intrinsic::usub_with_overflow: 546 case Intrinsic::ssub_with_overflow: 547 // undef - X -> undef 548 // X - undef -> undef 549 if (isa<UndefValue>(II->getArgOperand(0)) || 550 isa<UndefValue>(II->getArgOperand(1))) 551 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 552 553 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 554 // X - 0 -> {X, false} 555 if (RHS->isZero()) { 556 Constant *V[] = { 557 UndefValue::get(II->getArgOperand(0)->getType()), 558 ConstantInt::getFalse(II->getContext()) 559 }; 560 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 561 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 562 } 563 } 564 break; 565 case Intrinsic::umul_with_overflow: 566 case Intrinsic::smul_with_overflow: 567 // Canonicalize constants into the RHS. 568 if (isa<Constant>(II->getArgOperand(0)) && 569 !isa<Constant>(II->getArgOperand(1))) { 570 Value *LHS = II->getArgOperand(0); 571 II->setArgOperand(0, II->getArgOperand(1)); 572 II->setArgOperand(1, LHS); 573 return II; 574 } 575 576 // X * undef -> undef 577 if (isa<UndefValue>(II->getArgOperand(1))) 578 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 579 580 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 581 // X*0 -> {0, false} 582 if (RHSI->isZero()) 583 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 584 585 // X * 1 -> {X, false} 586 if (RHSI->equalsInt(1)) { 587 Constant *V[] = { 588 UndefValue::get(II->getArgOperand(0)->getType()), 589 ConstantInt::getFalse(II->getContext()) 590 }; 591 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 592 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0); 593 } 594 } 595 break; 596 case Intrinsic::ppc_altivec_lvx: 597 case Intrinsic::ppc_altivec_lvxl: 598 case Intrinsic::x86_sse_loadu_ps: 599 case Intrinsic::x86_sse2_loadu_pd: 600 case Intrinsic::x86_sse2_loadu_dq: 601 // Turn PPC lvx -> load if the pointer is known aligned. 602 // Turn X86 loadups -> load if the pointer is known aligned. 603 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 604 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 605 PointerType::getUnqual(II->getType())); 606 return new LoadInst(Ptr); 607 } 608 break; 609 case Intrinsic::ppc_altivec_stvx: 610 case Intrinsic::ppc_altivec_stvxl: 611 // Turn stvx -> store if the pointer is known aligned. 612 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) { 613 const Type *OpPtrTy = 614 PointerType::getUnqual(II->getArgOperand(0)->getType()); 615 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 616 return new StoreInst(II->getArgOperand(0), Ptr); 617 } 618 break; 619 case Intrinsic::x86_sse_storeu_ps: 620 case Intrinsic::x86_sse2_storeu_pd: 621 case Intrinsic::x86_sse2_storeu_dq: 622 // Turn X86 storeu -> store if the pointer is known aligned. 623 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) { 624 const Type *OpPtrTy = 625 PointerType::getUnqual(II->getArgOperand(1)->getType()); 626 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy); 627 return new StoreInst(II->getArgOperand(1), Ptr); 628 } 629 break; 630 631 case Intrinsic::x86_sse_cvttss2si: { 632 // These intrinsics only demands the 0th element of its input vector. If 633 // we can simplify the input based on that, do so now. 634 unsigned VWidth = 635 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements(); 636 APInt DemandedElts(VWidth, 1); 637 APInt UndefElts(VWidth, 0); 638 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), 639 DemandedElts, UndefElts)) { 640 II->setArgOperand(0, V); 641 return II; 642 } 643 break; 644 } 645 646 case Intrinsic::ppc_altivec_vperm: 647 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 648 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 649 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 650 651 // Check that all of the elements are integer constants or undefs. 652 bool AllEltsOk = true; 653 for (unsigned i = 0; i != 16; ++i) { 654 if (!isa<ConstantInt>(Mask->getOperand(i)) && 655 !isa<UndefValue>(Mask->getOperand(i))) { 656 AllEltsOk = false; 657 break; 658 } 659 } 660 661 if (AllEltsOk) { 662 // Cast the input vectors to byte vectors. 663 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 664 Mask->getType()); 665 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 666 Mask->getType()); 667 Value *Result = UndefValue::get(Op0->getType()); 668 669 // Only extract each element once. 670 Value *ExtractedElts[32]; 671 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 672 673 for (unsigned i = 0; i != 16; ++i) { 674 if (isa<UndefValue>(Mask->getOperand(i))) 675 continue; 676 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 677 Idx &= 31; // Match the hardware behavior. 678 679 if (ExtractedElts[Idx] == 0) { 680 ExtractedElts[Idx] = 681 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 682 ConstantInt::get(Type::getInt32Ty(II->getContext()), 683 Idx&15, false), "tmp"); 684 } 685 686 // Insert this value into the result vector. 687 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 688 ConstantInt::get(Type::getInt32Ty(II->getContext()), 689 i, false), "tmp"); 690 } 691 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 692 } 693 } 694 break; 695 696 case Intrinsic::stackrestore: { 697 // If the save is right next to the restore, remove the restore. This can 698 // happen when variable allocas are DCE'd. 699 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 700 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 701 BasicBlock::iterator BI = SS; 702 if (&*++BI == II) 703 return EraseInstFromFunction(CI); 704 } 705 } 706 707 // Scan down this block to see if there is another stack restore in the 708 // same block without an intervening call/alloca. 709 BasicBlock::iterator BI = II; 710 TerminatorInst *TI = II->getParent()->getTerminator(); 711 bool CannotRemove = false; 712 for (++BI; &*BI != TI; ++BI) { 713 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 714 CannotRemove = true; 715 break; 716 } 717 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 718 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 719 // If there is a stackrestore below this one, remove this one. 720 if (II->getIntrinsicID() == Intrinsic::stackrestore) 721 return EraseInstFromFunction(CI); 722 // Otherwise, ignore the intrinsic. 723 } else { 724 // If we found a non-intrinsic call, we can't remove the stack 725 // restore. 726 CannotRemove = true; 727 break; 728 } 729 } 730 } 731 732 // If the stack restore is in a return/unwind block and if there are no 733 // allocas or calls between the restore and the return, nuke the restore. 734 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 735 return EraseInstFromFunction(CI); 736 break; 737 } 738 } 739 740 return visitCallSite(II); 741} 742 743// InvokeInst simplification 744// 745Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 746 return visitCallSite(&II); 747} 748 749/// isSafeToEliminateVarargsCast - If this cast does not affect the value 750/// passed through the varargs area, we can eliminate the use of the cast. 751static bool isSafeToEliminateVarargsCast(const CallSite CS, 752 const CastInst * const CI, 753 const TargetData * const TD, 754 const int ix) { 755 if (!CI->isLosslessCast()) 756 return false; 757 758 // The size of ByVal arguments is derived from the type, so we 759 // can't change to a type with a different size. If the size were 760 // passed explicitly we could avoid this check. 761 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 762 return true; 763 764 const Type* SrcTy = 765 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 766 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 767 if (!SrcTy->isSized() || !DstTy->isSized()) 768 return false; 769 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 770 return false; 771 return true; 772} 773 774namespace { 775class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { 776 InstCombiner *IC; 777protected: 778 void replaceCall(Value *With) { 779 NewInstruction = IC->ReplaceInstUsesWith(*CI, With); 780 } 781 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { 782 if (ConstantInt *SizeCI = 783 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) { 784 if (SizeCI->isAllOnesValue()) 785 return true; 786 if (isString) 787 return SizeCI->getZExtValue() >= 788 GetStringLength(CI->getArgOperand(SizeArgOp)); 789 if (ConstantInt *Arg = dyn_cast<ConstantInt>( 790 CI->getArgOperand(SizeArgOp))) 791 return SizeCI->getZExtValue() >= Arg->getZExtValue(); 792 } 793 return false; 794 } 795public: 796 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } 797 Instruction *NewInstruction; 798}; 799} // end anonymous namespace 800 801// Try to fold some different type of calls here. 802// Currently we're only working with the checking functions, memcpy_chk, 803// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, 804// strcat_chk and strncat_chk. 805Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { 806 if (CI->getCalledFunction() == 0) return 0; 807 808 InstCombineFortifiedLibCalls Simplifier(this); 809 Simplifier.fold(CI, TD); 810 return Simplifier.NewInstruction; 811} 812 813// visitCallSite - Improvements for call and invoke instructions. 814// 815Instruction *InstCombiner::visitCallSite(CallSite CS) { 816 bool Changed = false; 817 818 // If the callee is a constexpr cast of a function, attempt to move the cast 819 // to the arguments of the call/invoke. 820 if (transformConstExprCastCall(CS)) return 0; 821 822 Value *Callee = CS.getCalledValue(); 823 824 if (Function *CalleeF = dyn_cast<Function>(Callee)) 825 // If the call and callee calling conventions don't match, this call must 826 // be unreachable, as the call is undefined. 827 if (CalleeF->getCallingConv() != CS.getCallingConv() && 828 // Only do this for calls to a function with a body. A prototype may 829 // not actually end up matching the implementation's calling conv for a 830 // variety of reasons (e.g. it may be written in assembly). 831 !CalleeF->isDeclaration()) { 832 Instruction *OldCall = CS.getInstruction(); 833 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 834 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 835 OldCall); 836 // If OldCall dues not return void then replaceAllUsesWith undef. 837 // This allows ValueHandlers and custom metadata to adjust itself. 838 if (!OldCall->getType()->isVoidTy()) 839 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 840 if (isa<CallInst>(OldCall)) 841 return EraseInstFromFunction(*OldCall); 842 843 // We cannot remove an invoke, because it would change the CFG, just 844 // change the callee to a null pointer. 845 cast<InvokeInst>(OldCall)->setCalledFunction( 846 Constant::getNullValue(CalleeF->getType())); 847 return 0; 848 } 849 850 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 851 // This instruction is not reachable, just remove it. We insert a store to 852 // undef so that we know that this code is not reachable, despite the fact 853 // that we can't modify the CFG here. 854 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 855 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 856 CS.getInstruction()); 857 858 // If CS does not return void then replaceAllUsesWith undef. 859 // This allows ValueHandlers and custom metadata to adjust itself. 860 if (!CS.getInstruction()->getType()->isVoidTy()) 861 CS.getInstruction()-> 862 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 863 864 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 865 // Don't break the CFG, insert a dummy cond branch. 866 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 867 ConstantInt::getTrue(Callee->getContext()), II); 868 } 869 return EraseInstFromFunction(*CS.getInstruction()); 870 } 871 872 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 873 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 874 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 875 return transformCallThroughTrampoline(CS); 876 877 const PointerType *PTy = cast<PointerType>(Callee->getType()); 878 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 879 if (FTy->isVarArg()) { 880 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 881 // See if we can optimize any arguments passed through the varargs area of 882 // the call. 883 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 884 E = CS.arg_end(); I != E; ++I, ++ix) { 885 CastInst *CI = dyn_cast<CastInst>(*I); 886 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 887 *I = CI->getOperand(0); 888 Changed = true; 889 } 890 } 891 } 892 893 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 894 // Inline asm calls cannot throw - mark them 'nounwind'. 895 CS.setDoesNotThrow(); 896 Changed = true; 897 } 898 899 // Try to optimize the call if possible, we require TargetData for most of 900 // this. None of these calls are seen as possibly dead so go ahead and 901 // delete the instruction now. 902 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { 903 Instruction *I = tryOptimizeCall(CI, TD); 904 // If we changed something return the result, etc. Otherwise let 905 // the fallthrough check. 906 if (I) return EraseInstFromFunction(*I); 907 } 908 909 return Changed ? CS.getInstruction() : 0; 910} 911 912// transformConstExprCastCall - If the callee is a constexpr cast of a function, 913// attempt to move the cast to the arguments of the call/invoke. 914// 915bool InstCombiner::transformConstExprCastCall(CallSite CS) { 916 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 917 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 918 if (CE->getOpcode() != Instruction::BitCast || 919 !isa<Function>(CE->getOperand(0))) 920 return false; 921 Function *Callee = cast<Function>(CE->getOperand(0)); 922 Instruction *Caller = CS.getInstruction(); 923 const AttrListPtr &CallerPAL = CS.getAttributes(); 924 925 // Okay, this is a cast from a function to a different type. Unless doing so 926 // would cause a type conversion of one of our arguments, change this call to 927 // be a direct call with arguments casted to the appropriate types. 928 // 929 const FunctionType *FT = Callee->getFunctionType(); 930 const Type *OldRetTy = Caller->getType(); 931 const Type *NewRetTy = FT->getReturnType(); 932 933 if (NewRetTy->isStructTy()) 934 return false; // TODO: Handle multiple return values. 935 936 // Check to see if we are changing the return type... 937 if (OldRetTy != NewRetTy) { 938 if (Callee->isDeclaration() && 939 // Conversion is ok if changing from one pointer type to another or from 940 // a pointer to an integer of the same size. 941 !((OldRetTy->isPointerTy() || !TD || 942 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 943 (NewRetTy->isPointerTy() || !TD || 944 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 945 return false; // Cannot transform this return value. 946 947 if (!Caller->use_empty() && 948 // void -> non-void is handled specially 949 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 950 return false; // Cannot transform this return value. 951 952 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 953 Attributes RAttrs = CallerPAL.getRetAttributes(); 954 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 955 return false; // Attribute not compatible with transformed value. 956 } 957 958 // If the callsite is an invoke instruction, and the return value is used by 959 // a PHI node in a successor, we cannot change the return type of the call 960 // because there is no place to put the cast instruction (without breaking 961 // the critical edge). Bail out in this case. 962 if (!Caller->use_empty()) 963 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 964 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 965 UI != E; ++UI) 966 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 967 if (PN->getParent() == II->getNormalDest() || 968 PN->getParent() == II->getUnwindDest()) 969 return false; 970 } 971 972 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 973 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 974 975 CallSite::arg_iterator AI = CS.arg_begin(); 976 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 977 const Type *ParamTy = FT->getParamType(i); 978 const Type *ActTy = (*AI)->getType(); 979 980 if (!CastInst::isCastable(ActTy, ParamTy)) 981 return false; // Cannot transform this parameter value. 982 983 if (CallerPAL.getParamAttributes(i + 1) 984 & Attribute::typeIncompatible(ParamTy)) 985 return false; // Attribute not compatible with transformed value. 986 987 // Converting from one pointer type to another or between a pointer and an 988 // integer of the same size is safe even if we do not have a body. 989 bool isConvertible = ActTy == ParamTy || 990 (TD && ((ParamTy->isPointerTy() || 991 ParamTy == TD->getIntPtrType(Caller->getContext())) && 992 (ActTy->isPointerTy() || 993 ActTy == TD->getIntPtrType(Caller->getContext())))); 994 if (Callee->isDeclaration() && !isConvertible) return false; 995 } 996 997 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 998 Callee->isDeclaration()) 999 return false; // Do not delete arguments unless we have a function body. 1000 1001 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 1002 !CallerPAL.isEmpty()) 1003 // In this case we have more arguments than the new function type, but we 1004 // won't be dropping them. Check that these extra arguments have attributes 1005 // that are compatible with being a vararg call argument. 1006 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 1007 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 1008 break; 1009 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 1010 if (PAttrs & Attribute::VarArgsIncompatible) 1011 return false; 1012 } 1013 1014 // Okay, we decided that this is a safe thing to do: go ahead and start 1015 // inserting cast instructions as necessary... 1016 std::vector<Value*> Args; 1017 Args.reserve(NumActualArgs); 1018 SmallVector<AttributeWithIndex, 8> attrVec; 1019 attrVec.reserve(NumCommonArgs); 1020 1021 // Get any return attributes. 1022 Attributes RAttrs = CallerPAL.getRetAttributes(); 1023 1024 // If the return value is not being used, the type may not be compatible 1025 // with the existing attributes. Wipe out any problematic attributes. 1026 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 1027 1028 // Add the new return attributes. 1029 if (RAttrs) 1030 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 1031 1032 AI = CS.arg_begin(); 1033 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 1034 const Type *ParamTy = FT->getParamType(i); 1035 if ((*AI)->getType() == ParamTy) { 1036 Args.push_back(*AI); 1037 } else { 1038 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 1039 false, ParamTy, false); 1040 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 1041 } 1042 1043 // Add any parameter attributes. 1044 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1045 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1046 } 1047 1048 // If the function takes more arguments than the call was taking, add them 1049 // now. 1050 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 1051 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 1052 1053 // If we are removing arguments to the function, emit an obnoxious warning. 1054 if (FT->getNumParams() < NumActualArgs) { 1055 if (!FT->isVarArg()) { 1056 errs() << "WARNING: While resolving call to function '" 1057 << Callee->getName() << "' arguments were dropped!\n"; 1058 } else { 1059 // Add all of the arguments in their promoted form to the arg list. 1060 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 1061 const Type *PTy = getPromotedType((*AI)->getType()); 1062 if (PTy != (*AI)->getType()) { 1063 // Must promote to pass through va_arg area! 1064 Instruction::CastOps opcode = 1065 CastInst::getCastOpcode(*AI, false, PTy, false); 1066 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 1067 } else { 1068 Args.push_back(*AI); 1069 } 1070 1071 // Add any parameter attributes. 1072 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 1073 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 1074 } 1075 } 1076 } 1077 1078 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 1079 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 1080 1081 if (NewRetTy->isVoidTy()) 1082 Caller->setName(""); // Void type should not have a name. 1083 1084 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 1085 attrVec.end()); 1086 1087 Instruction *NC; 1088 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1089 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 1090 Args.begin(), Args.end(), 1091 Caller->getName(), Caller); 1092 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 1093 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 1094 } else { 1095 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 1096 Caller->getName(), Caller); 1097 CallInst *CI = cast<CallInst>(Caller); 1098 if (CI->isTailCall()) 1099 cast<CallInst>(NC)->setTailCall(); 1100 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 1101 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 1102 } 1103 1104 // Insert a cast of the return type as necessary. 1105 Value *NV = NC; 1106 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 1107 if (!NV->getType()->isVoidTy()) { 1108 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 1109 OldRetTy, false); 1110 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 1111 1112 // If this is an invoke instruction, we should insert it after the first 1113 // non-phi, instruction in the normal successor block. 1114 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1115 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 1116 InsertNewInstBefore(NC, *I); 1117 } else { 1118 // Otherwise, it's a call, just insert cast right after the call instr 1119 InsertNewInstBefore(NC, *Caller); 1120 } 1121 Worklist.AddUsersToWorkList(*Caller); 1122 } else { 1123 NV = UndefValue::get(Caller->getType()); 1124 } 1125 } 1126 1127 1128 if (!Caller->use_empty()) 1129 Caller->replaceAllUsesWith(NV); 1130 1131 EraseInstFromFunction(*Caller); 1132 return true; 1133} 1134 1135// transformCallThroughTrampoline - Turn a call to a function created by the 1136// init_trampoline intrinsic into a direct call to the underlying function. 1137// 1138Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1139 Value *Callee = CS.getCalledValue(); 1140 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1141 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1142 const AttrListPtr &Attrs = CS.getAttributes(); 1143 1144 // If the call already has the 'nest' attribute somewhere then give up - 1145 // otherwise 'nest' would occur twice after splicing in the chain. 1146 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1147 return 0; 1148 1149 IntrinsicInst *Tramp = 1150 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1151 1152 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts()); 1153 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1154 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1155 1156 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1157 if (!NestAttrs.isEmpty()) { 1158 unsigned NestIdx = 1; 1159 const Type *NestTy = 0; 1160 Attributes NestAttr = Attribute::None; 1161 1162 // Look for a parameter marked with the 'nest' attribute. 1163 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1164 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1165 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1166 // Record the parameter type and any other attributes. 1167 NestTy = *I; 1168 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1169 break; 1170 } 1171 1172 if (NestTy) { 1173 Instruction *Caller = CS.getInstruction(); 1174 std::vector<Value*> NewArgs; 1175 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1176 1177 SmallVector<AttributeWithIndex, 8> NewAttrs; 1178 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1179 1180 // Insert the nest argument into the call argument list, which may 1181 // mean appending it. Likewise for attributes. 1182 1183 // Add any result attributes. 1184 if (Attributes Attr = Attrs.getRetAttributes()) 1185 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1186 1187 { 1188 unsigned Idx = 1; 1189 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1190 do { 1191 if (Idx == NestIdx) { 1192 // Add the chain argument and attributes. 1193 Value *NestVal = Tramp->getArgOperand(2); 1194 if (NestVal->getType() != NestTy) 1195 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1196 NewArgs.push_back(NestVal); 1197 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1198 } 1199 1200 if (I == E) 1201 break; 1202 1203 // Add the original argument and attributes. 1204 NewArgs.push_back(*I); 1205 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1206 NewAttrs.push_back 1207 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1208 1209 ++Idx, ++I; 1210 } while (1); 1211 } 1212 1213 // Add any function attributes. 1214 if (Attributes Attr = Attrs.getFnAttributes()) 1215 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1216 1217 // The trampoline may have been bitcast to a bogus type (FTy). 1218 // Handle this by synthesizing a new function type, equal to FTy 1219 // with the chain parameter inserted. 1220 1221 std::vector<const Type*> NewTypes; 1222 NewTypes.reserve(FTy->getNumParams()+1); 1223 1224 // Insert the chain's type into the list of parameter types, which may 1225 // mean appending it. 1226 { 1227 unsigned Idx = 1; 1228 FunctionType::param_iterator I = FTy->param_begin(), 1229 E = FTy->param_end(); 1230 1231 do { 1232 if (Idx == NestIdx) 1233 // Add the chain's type. 1234 NewTypes.push_back(NestTy); 1235 1236 if (I == E) 1237 break; 1238 1239 // Add the original type. 1240 NewTypes.push_back(*I); 1241 1242 ++Idx, ++I; 1243 } while (1); 1244 } 1245 1246 // Replace the trampoline call with a direct call. Let the generic 1247 // code sort out any function type mismatches. 1248 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1249 FTy->isVarArg()); 1250 Constant *NewCallee = 1251 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1252 NestF : ConstantExpr::getBitCast(NestF, 1253 PointerType::getUnqual(NewFTy)); 1254 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1255 NewAttrs.end()); 1256 1257 Instruction *NewCaller; 1258 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1259 NewCaller = InvokeInst::Create(NewCallee, 1260 II->getNormalDest(), II->getUnwindDest(), 1261 NewArgs.begin(), NewArgs.end(), 1262 Caller->getName(), Caller); 1263 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1264 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1265 } else { 1266 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1267 Caller->getName(), Caller); 1268 if (cast<CallInst>(Caller)->isTailCall()) 1269 cast<CallInst>(NewCaller)->setTailCall(); 1270 cast<CallInst>(NewCaller)-> 1271 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1272 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1273 } 1274 if (!Caller->getType()->isVoidTy()) 1275 Caller->replaceAllUsesWith(NewCaller); 1276 Caller->eraseFromParent(); 1277 Worklist.Remove(Caller); 1278 return 0; 1279 } 1280 } 1281 1282 // Replace the trampoline call with a direct call. Since there is no 'nest' 1283 // parameter, there is no need to adjust the argument list. Let the generic 1284 // code sort out any function type mismatches. 1285 Constant *NewCallee = 1286 NestF->getType() == PTy ? NestF : 1287 ConstantExpr::getBitCast(NestF, PTy); 1288 CS.setCalledFunction(NewCallee); 1289 return CS.getInstruction(); 1290} 1291 1292