InstCombineCalls.cpp revision 415326b4edcc967dfb03c5ab41923b195e7c3cb1
151a8d8528135ba4e3e4cf7cd711a9e47b19078a3Chris Lattner//===- InstCombineCalls.cpp -----------------------------------------------===// 2deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve// 36fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell// The LLVM Compiler Infrastructure 46fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell// 56fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell// This file is distributed under the University of Illinois Open Source 66fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell// License. See LICENSE.TXT for details. 76fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell// 86fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell//===----------------------------------------------------------------------===// 96fbcc26f1460eaee4e0eb8b426fc1ff0c7af11beJohn Criswell// 10e8b5413e5d0c7c0fc5b384e975c4ca87f4c00699Chris Lattner// This file implements the visitCall and visitInvoke functions. 11e8b5413e5d0c7c0fc5b384e975c4ca87f4c00699Chris Lattner// 1251a8d8528135ba4e3e4cf7cd711a9e47b19078a3Chris Lattner//===----------------------------------------------------------------------===// 13deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 14fce1143bcfa73f61845002fa50473d1a01384202Misha Brukman#include "InstCombine.h" 15fce1143bcfa73f61845002fa50473d1a01384202Misha Brukman#include "llvm/IntrinsicInst.h" 16deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve#include "llvm/Support/CallSite.h" 17c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos#include "llvm/Target/TargetData.h" 1894dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos#include "llvm/Analysis/MemoryBuiltins.h" 19f13a3f4dd1eaa89ca9a64a1e820b089facca3366Brian Gaekeusing namespace llvm; 20d0fde30ce850b78371fd1386338350591f9ff494Brian Gaeke 21d0fde30ce850b78371fd1386338350591f9ff494Brian Gaeke/// getPromotedType - Return the specified type promoted as it would be to pass 225e61fa95196b85281eec655787e9c73267532bd1Chris Lattner/// though a va_arg area. 23d0fde30ce850b78371fd1386338350591f9ff494Brian Gaekestatic const Type *getPromotedType(const Type *Ty) { 2494dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 2594dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos if (ITy->getBitWidth() < 32) 265e61fa95196b85281eec655787e9c73267532bd1Chris Lattner return Type::getInt32Ty(Ty->getContext()); 2794dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos } 2894dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos return Ty; 2994dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos} 3094dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos 3194dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos/// EnforceKnownAlignment - If the specified pointer points to an object that 3294dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos/// we control, modify the object's alignment to PrefAlign. This isn't 3394dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos/// often possible though. If alignment is important, a more reliable approach 3494dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos/// is to simply align all global variables and allocation instructions to 3594dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos/// their preferred alignment from the beginning. 3694dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos/// 3794dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenosstatic unsigned EnforceKnownAlignment(Value *V, 3894dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos unsigned Align, unsigned PrefAlign) { 3994dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos 4094dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos User *U = dyn_cast<User>(V); 4194dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos if (!U) return Align; 4294dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos 4394dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos switch (Operator::getOpcode(U)) { 4494dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos default: break; 4594dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos case Instruction::BitCast: 46aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 47aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos case Instruction::GetElementPtr: { 48aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos // If all indexes are zero, it is just the alignment of the base pointer. 49aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos bool AllZeroOperands = true; 50aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 51aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos if (!isa<Constant>(*i) || 52aad5c0505183a5b7913f1a443a1f0650122551ccAlkis Evlogimenos !cast<Constant>(*i)->isNullValue()) { 5394dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos AllZeroOperands = false; 5494dc07728f091c652f0a8059aba6dce5018485eeAlkis Evlogimenos break; 55aec11f1decda111112c39803cb89dace81cd0568Chris Lattner } 56deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 57d0aa0cdbc6fee00f2b2019633a9b9d00d301ac68Chris Lattner if (AllZeroOperands) { 58c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos // Treat this like a bitcast. 59c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 60c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos } 618e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner break; 621194e9501984daf0d3237ed1bf18a156173e7fd4Chris Lattner } 6376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke } 6476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 6576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 66deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve // If there is a large requested alignment and we can, bump up the alignment 67ab8672c8bb83e722b856eac67863542ea7e0cbb2Alkis Evlogimenos // of the global. 68ab8672c8bb83e722b856eac67863542ea7e0cbb2Alkis Evlogimenos if (!GV->isDeclaration()) { 69ab8672c8bb83e722b856eac67863542ea7e0cbb2Alkis Evlogimenos if (GV->getAlignment() >= PrefAlign) 70fce1143bcfa73f61845002fa50473d1a01384202Misha Brukman Align = GV->getAlignment(); 71deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve else { 72d0aa0cdbc6fee00f2b2019633a9b9d00d301ac68Chris Lattner GV->setAlignment(PrefAlign); 73d0aa0cdbc6fee00f2b2019633a9b9d00d301ac68Chris Lattner Align = PrefAlign; 74d0aa0cdbc6fee00f2b2019633a9b9d00d301ac68Chris Lattner } 751194e9501984daf0d3237ed1bf18a156173e7fd4Chris Lattner } 765e61fa95196b85281eec655787e9c73267532bd1Chris Lattner } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 775e61fa95196b85281eec655787e9c73267532bd1Chris Lattner // If there is a requested alignment and if this is an alloca, round up. 785e61fa95196b85281eec655787e9c73267532bd1Chris Lattner if (AI->getAlignment() >= PrefAlign) 795e61fa95196b85281eec655787e9c73267532bd1Chris Lattner Align = AI->getAlignment(); 805e61fa95196b85281eec655787e9c73267532bd1Chris Lattner else { 81c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos AI->setAlignment(PrefAlign); 82c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos Align = PrefAlign; 83deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve } 84deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve } 85deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 86deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve return Align; 87deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve} 88deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 89c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that 90c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos/// we can determine, return it, otherwise return 0. If PrefAlign is specified, 91deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve/// and it is more than the alignment of the ultimate object, see if we can 92deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve/// increase the alignment of the ultimate object, making this check succeed. 93deb9654056939a12981446f6ed1139dca3412746Vikram S. Adveunsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V, 94deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve unsigned PrefAlign) { 95deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 96deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve sizeof(PrefAlign) * CHAR_BIT; 97deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve APInt Mask = APInt::getAllOnesValue(BitWidth); 98deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 99deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve ComputeMaskedBits(V, Mask, KnownZero, KnownOne); 100deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve unsigned TrailZ = KnownZero.countTrailingOnes(); 10176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 10276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 10376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (PrefAlign > Align) 10476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke Align = EnforceKnownAlignment(V, Align, PrefAlign); 10576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 10676456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // We don't need to make any adjustment. 10776456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke return Align; 10876456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke} 10976456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 11076456bc40c79fcae4da52d34f96c079d9759257cBrian GaekeInstruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { 11176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1)); 11276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2)); 11376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke unsigned MinAlign = std::min(DstAlign, SrcAlign); 11476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke unsigned CopyAlign = MI->getAlignment(); 11576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 11676456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (CopyAlign < MinAlign) { 11776456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 11876456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke MinAlign, false)); 11976456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke return MI; 12076456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke } 12176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 12276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 12376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // load/store. 12476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3)); 12576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (MemOpLength == 0) return 0; 12676456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 12776456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // Source and destination pointer types are always "i8*" for intrinsic. See 12876456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // if the size is something we can handle with a single primitive load/store. 12976456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // A single load+store correctly handles overlapping memory in the memmove 13076456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // case. 13176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke unsigned Size = MemOpLength->getZExtValue(); 13276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (Size == 0) return MI; // Delete this mem transfer. 13376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 13476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (Size > 8 || (Size&(Size-1))) 13576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke return 0; // If not 1/2/4/8 bytes, exit. 13676456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 13776456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // Use an integer load+store unless we can find something better. 13876456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke Type *NewPtrTy = 13976456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3)); 14076456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke 14176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // Memcpy forces the use of i8* for the source and destination. That means 14276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // that if you're using memcpy to move one double around, you'll get a cast 14376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // from double* to i8*. We'd much rather use a double load+store rather than 14476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // an i64 load+store, here because this improves the odds that the source or 14576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // dest address will be promotable. See if we can find a better type than the 14676456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // integer datatype. 14776456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke Value *StrippedDest = MI->getOperand(1)->stripPointerCasts(); 14876456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (StrippedDest != MI->getOperand(1)) { 14976456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke const Type *SrcETy = cast<PointerType>(StrippedDest->getType()) 15076456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke ->getElementType(); 15176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { 15276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // The SrcETy might be something like {{{double}}} or [1 x double]. Rip 15376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke // down through these levels if so. 15476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke while (!SrcETy->isSingleValueType()) { 15576456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (const StructType *STy = dyn_cast<StructType>(SrcETy)) { 15676456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (STy->getNumElements() == 1) 15776456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke SrcETy = STy->getElementType(0); 15876456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke else 15976456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke break; 16076456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) { 16176456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke if (ATy->getNumElements() == 1) 16276456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke SrcETy = ATy->getElementType(); 16376456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke else 16476456bc40c79fcae4da52d34f96c079d9759257cBrian Gaeke break; 165743d0a1f831f1d5a3141a6ca730558f40c35690aAlkis Evlogimenos } else 166743d0a1f831f1d5a3141a6ca730558f40c35690aAlkis Evlogimenos break; 167743d0a1f831f1d5a3141a6ca730558f40c35690aAlkis Evlogimenos } 168743d0a1f831f1d5a3141a6ca730558f40c35690aAlkis Evlogimenos 169743d0a1f831f1d5a3141a6ca730558f40c35690aAlkis Evlogimenos if (SrcETy->isSingleValueType()) 170deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve NewPtrTy = PointerType::getUnqual(SrcETy); 171deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve } 172deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve } 173deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 174deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 17546d6a1aeb549a2e4ccd982a1a2cefda541d79c52Vikram S. Adve // If the memcpy/memmove provides better alignment info than we can 176c0b9dc5be79f009d260edb5cd5e1d8346587aaa2Alkis Evlogimenos // infer, use it. 177deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve SrcAlign = std::max(SrcAlign, CopyAlign); 17846d6a1aeb549a2e4ccd982a1a2cefda541d79c52Vikram S. Adve DstAlign = std::max(DstAlign, CopyAlign); 179deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 18083706a5a3a6f19451765b743c5a72b62f74eb71aChris Lattner Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy); 181da44b151259525abc9c299f89b9532f3a9883b4eBrian Gaeke Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy); 1828e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign); 183f13a3f4dd1eaa89ca9a64a1e820b089facca3366Brian Gaeke InsertNewInstBefore(L, *MI); 184f13a3f4dd1eaa89ca9a64a1e820b089facca3366Brian Gaeke InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI); 185f13a3f4dd1eaa89ca9a64a1e820b089facca3366Brian Gaeke 186f13a3f4dd1eaa89ca9a64a1e820b089facca3366Brian Gaeke // Set the size of the copy to 0, it will be deleted on the next iteration. 1878e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner MI->setOperand(3, Constant::getNullValue(MemOpLength->getType())); 1888e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner return MI; 1898e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner} 1908e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner 1918e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris LattnerInstruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { 1928e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); 1938e7ae9860bd1f29c95e4e10fe151a22aaafafef9Chris Lattner if (MI->getAlignment() < Alignment) { 194deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), 195deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve Alignment, false)); 196d0fde30ce850b78371fd1386338350591f9ff494Brian Gaeke return MI; 197deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve } 198deb9654056939a12981446f6ed1139dca3412746Vikram S. Adve 199 // Extract the length and alignment and fill if they are constant. 200 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 201 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 202 if (!LenC || !FillC || !FillC->getType()->isInteger(8)) 203 return 0; 204 uint64_t Len = LenC->getZExtValue(); 205 Alignment = MI->getAlignment(); 206 207 // If the length is zero, this is a no-op 208 if (Len == 0) return MI; // memset(d,c,0,a) -> noop 209 210 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 211 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 212 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 213 214 Value *Dest = MI->getDest(); 215 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy)); 216 217 // Alignment 0 is identity for alignment 1 for memset, but not store. 218 if (Alignment == 0) Alignment = 1; 219 220 // Extract the fill value and store. 221 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 222 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), 223 Dest, false, Alignment), *MI); 224 225 // Set the size of the copy to 0, it will be deleted on the next iteration. 226 MI->setLength(Constant::getNullValue(LenC->getType())); 227 return MI; 228 } 229 230 return 0; 231} 232 233/// visitCallInst - CallInst simplification. This mostly only handles folding 234/// of intrinsic instructions. For normal calls, it allows visitCallSite to do 235/// the heavy lifting. 236/// 237Instruction *InstCombiner::visitCallInst(CallInst &CI) { 238 if (isFreeCall(&CI)) 239 return visitFree(CI); 240 241 // If the caller function is nounwind, mark the call as nounwind, even if the 242 // callee isn't. 243 if (CI.getParent()->getParent()->doesNotThrow() && 244 !CI.doesNotThrow()) { 245 CI.setDoesNotThrow(); 246 return &CI; 247 } 248 249 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 250 if (!II) return visitCallSite(&CI); 251 252 // Intrinsics cannot occur in an invoke, so handle them here instead of in 253 // visitCallSite. 254 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) { 255 bool Changed = false; 256 257 // memmove/cpy/set of zero bytes is a noop. 258 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 259 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI); 260 261 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 262 if (CI->getZExtValue() == 1) { 263 // Replace the instruction with just byte operations. We would 264 // transform other cases to loads/stores, but we don't know if 265 // alignment is sufficient. 266 } 267 } 268 269 // If we have a memmove and the source operation is a constant global, 270 // then the source and dest pointers can't alias, so we can change this 271 // into a call to memcpy. 272 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 273 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 274 if (GVSrc->isConstant()) { 275 Module *M = CI.getParent()->getParent()->getParent(); 276 Intrinsic::ID MemCpyID = Intrinsic::memcpy; 277 const Type *Tys[1]; 278 Tys[0] = CI.getOperand(3)->getType(); 279 CI.setOperand(0, 280 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1)); 281 Changed = true; 282 } 283 } 284 285 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { 286 // memmove(x,x,size) -> noop. 287 if (MTI->getSource() == MTI->getDest()) 288 return EraseInstFromFunction(CI); 289 } 290 291 // If we can determine a pointer alignment that is bigger than currently 292 // set, update the alignment. 293 if (isa<MemTransferInst>(MI)) { 294 if (Instruction *I = SimplifyMemTransfer(MI)) 295 return I; 296 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) { 297 if (Instruction *I = SimplifyMemSet(MSI)) 298 return I; 299 } 300 301 if (Changed) return II; 302 } 303 304 switch (II->getIntrinsicID()) { 305 default: break; 306 case Intrinsic::objectsize: { 307 const Type *ReturnTy = CI.getType(); 308 Value *Op1 = II->getOperand(1); 309 bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1); 310 311 if (!TD) break; 312 Op1 = Op1->stripPointerCasts(); 313 314 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) { 315 if (GV->hasDefinitiveInitializer()) { 316 Constant *C = GV->getInitializer(); 317 size_t globalSize = TD->getTypeAllocSize(C->getType()); 318 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, globalSize)); 319 } else { 320 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); 321 return ReplaceInstUsesWith(CI, RetVal); 322 } 323 } 324 } 325 case Intrinsic::bswap: 326 // bswap(bswap(x)) -> x 327 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1))) 328 if (Operand->getIntrinsicID() == Intrinsic::bswap) 329 return ReplaceInstUsesWith(CI, Operand->getOperand(1)); 330 331 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 332 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) { 333 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0))) 334 if (Operand->getIntrinsicID() == Intrinsic::bswap) { 335 unsigned C = Operand->getType()->getPrimitiveSizeInBits() - 336 TI->getType()->getPrimitiveSizeInBits(); 337 Value *CV = ConstantInt::get(Operand->getType(), C); 338 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV); 339 return new TruncInst(V, TI->getType()); 340 } 341 } 342 343 break; 344 case Intrinsic::powi: 345 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) { 346 // powi(x, 0) -> 1.0 347 if (Power->isZero()) 348 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); 349 // powi(x, 1) -> x 350 if (Power->isOne()) 351 return ReplaceInstUsesWith(CI, II->getOperand(1)); 352 // powi(x, -1) -> 1/x 353 if (Power->isAllOnesValue()) 354 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), 355 II->getOperand(1)); 356 } 357 break; 358 case Intrinsic::cttz: { 359 // If all bits below the first known one are known zero, 360 // this value is constant. 361 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 362 uint32_t BitWidth = IT->getBitWidth(); 363 APInt KnownZero(BitWidth, 0); 364 APInt KnownOne(BitWidth, 0); 365 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 366 KnownZero, KnownOne); 367 unsigned TrailingZeros = KnownOne.countTrailingZeros(); 368 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros)); 369 if ((Mask & KnownZero) == Mask) 370 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 371 APInt(BitWidth, TrailingZeros))); 372 373 } 374 break; 375 case Intrinsic::ctlz: { 376 // If all bits above the first known one are known zero, 377 // this value is constant. 378 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 379 uint32_t BitWidth = IT->getBitWidth(); 380 APInt KnownZero(BitWidth, 0); 381 APInt KnownOne(BitWidth, 0); 382 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth), 383 KnownZero, KnownOne); 384 unsigned LeadingZeros = KnownOne.countLeadingZeros(); 385 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros)); 386 if ((Mask & KnownZero) == Mask) 387 return ReplaceInstUsesWith(CI, ConstantInt::get(IT, 388 APInt(BitWidth, LeadingZeros))); 389 390 } 391 break; 392 case Intrinsic::uadd_with_overflow: { 393 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2); 394 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); 395 uint32_t BitWidth = IT->getBitWidth(); 396 APInt Mask = APInt::getSignBit(BitWidth); 397 APInt LHSKnownZero(BitWidth, 0); 398 APInt LHSKnownOne(BitWidth, 0); 399 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 400 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; 401 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; 402 403 if (LHSKnownNegative || LHSKnownPositive) { 404 APInt RHSKnownZero(BitWidth, 0); 405 APInt RHSKnownOne(BitWidth, 0); 406 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 407 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; 408 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; 409 if (LHSKnownNegative && RHSKnownNegative) { 410 // The sign bit is set in both cases: this MUST overflow. 411 // Create a simple add instruction, and insert it into the struct. 412 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI); 413 Worklist.Add(Add); 414 Constant *V[] = { 415 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext()) 416 }; 417 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 418 return InsertValueInst::Create(Struct, Add, 0); 419 } 420 421 if (LHSKnownPositive && RHSKnownPositive) { 422 // The sign bit is clear in both cases: this CANNOT overflow. 423 // Create a simple add instruction, and insert it into the struct. 424 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI); 425 Worklist.Add(Add); 426 Constant *V[] = { 427 UndefValue::get(LHS->getType()), 428 ConstantInt::getFalse(II->getContext()) 429 }; 430 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 431 return InsertValueInst::Create(Struct, Add, 0); 432 } 433 } 434 } 435 // FALL THROUGH uadd into sadd 436 case Intrinsic::sadd_with_overflow: 437 // Canonicalize constants into the RHS. 438 if (isa<Constant>(II->getOperand(1)) && 439 !isa<Constant>(II->getOperand(2))) { 440 Value *LHS = II->getOperand(1); 441 II->setOperand(1, II->getOperand(2)); 442 II->setOperand(2, LHS); 443 return II; 444 } 445 446 // X + undef -> undef 447 if (isa<UndefValue>(II->getOperand(2))) 448 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 449 450 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 451 // X + 0 -> {X, false} 452 if (RHS->isZero()) { 453 Constant *V[] = { 454 UndefValue::get(II->getOperand(0)->getType()), 455 ConstantInt::getFalse(II->getContext()) 456 }; 457 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 458 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 459 } 460 } 461 break; 462 case Intrinsic::usub_with_overflow: 463 case Intrinsic::ssub_with_overflow: 464 // undef - X -> undef 465 // X - undef -> undef 466 if (isa<UndefValue>(II->getOperand(1)) || 467 isa<UndefValue>(II->getOperand(2))) 468 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 469 470 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) { 471 // X - 0 -> {X, false} 472 if (RHS->isZero()) { 473 Constant *V[] = { 474 UndefValue::get(II->getOperand(1)->getType()), 475 ConstantInt::getFalse(II->getContext()) 476 }; 477 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 478 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 479 } 480 } 481 break; 482 case Intrinsic::umul_with_overflow: 483 case Intrinsic::smul_with_overflow: 484 // Canonicalize constants into the RHS. 485 if (isa<Constant>(II->getOperand(1)) && 486 !isa<Constant>(II->getOperand(2))) { 487 Value *LHS = II->getOperand(1); 488 II->setOperand(1, II->getOperand(2)); 489 II->setOperand(2, LHS); 490 return II; 491 } 492 493 // X * undef -> undef 494 if (isa<UndefValue>(II->getOperand(2))) 495 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType())); 496 497 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) { 498 // X*0 -> {0, false} 499 if (RHSI->isZero()) 500 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType())); 501 502 // X * 1 -> {X, false} 503 if (RHSI->equalsInt(1)) { 504 Constant *V[] = { 505 UndefValue::get(II->getOperand(1)->getType()), 506 ConstantInt::getFalse(II->getContext()) 507 }; 508 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false); 509 return InsertValueInst::Create(Struct, II->getOperand(1), 0); 510 } 511 } 512 break; 513 case Intrinsic::ppc_altivec_lvx: 514 case Intrinsic::ppc_altivec_lvxl: 515 case Intrinsic::x86_sse_loadu_ps: 516 case Intrinsic::x86_sse2_loadu_pd: 517 case Intrinsic::x86_sse2_loadu_dq: 518 // Turn PPC lvx -> load if the pointer is known aligned. 519 // Turn X86 loadups -> load if the pointer is known aligned. 520 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 521 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), 522 PointerType::getUnqual(II->getType())); 523 return new LoadInst(Ptr); 524 } 525 break; 526 case Intrinsic::ppc_altivec_stvx: 527 case Intrinsic::ppc_altivec_stvxl: 528 // Turn stvx -> store if the pointer is known aligned. 529 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) { 530 const Type *OpPtrTy = 531 PointerType::getUnqual(II->getOperand(1)->getType()); 532 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy); 533 return new StoreInst(II->getOperand(1), Ptr); 534 } 535 break; 536 case Intrinsic::x86_sse_storeu_ps: 537 case Intrinsic::x86_sse2_storeu_pd: 538 case Intrinsic::x86_sse2_storeu_dq: 539 // Turn X86 storeu -> store if the pointer is known aligned. 540 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { 541 const Type *OpPtrTy = 542 PointerType::getUnqual(II->getOperand(2)->getType()); 543 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy); 544 return new StoreInst(II->getOperand(2), Ptr); 545 } 546 break; 547 548 case Intrinsic::x86_sse_cvttss2si: { 549 // These intrinsics only demands the 0th element of its input vector. If 550 // we can simplify the input based on that, do so now. 551 unsigned VWidth = 552 cast<VectorType>(II->getOperand(1)->getType())->getNumElements(); 553 APInt DemandedElts(VWidth, 1); 554 APInt UndefElts(VWidth, 0); 555 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts, 556 UndefElts)) { 557 II->setOperand(1, V); 558 return II; 559 } 560 break; 561 } 562 563 case Intrinsic::ppc_altivec_vperm: 564 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 565 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) { 566 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 567 568 // Check that all of the elements are integer constants or undefs. 569 bool AllEltsOk = true; 570 for (unsigned i = 0; i != 16; ++i) { 571 if (!isa<ConstantInt>(Mask->getOperand(i)) && 572 !isa<UndefValue>(Mask->getOperand(i))) { 573 AllEltsOk = false; 574 break; 575 } 576 } 577 578 if (AllEltsOk) { 579 // Cast the input vectors to byte vectors. 580 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType()); 581 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType()); 582 Value *Result = UndefValue::get(Op0->getType()); 583 584 // Only extract each element once. 585 Value *ExtractedElts[32]; 586 memset(ExtractedElts, 0, sizeof(ExtractedElts)); 587 588 for (unsigned i = 0; i != 16; ++i) { 589 if (isa<UndefValue>(Mask->getOperand(i))) 590 continue; 591 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 592 Idx &= 31; // Match the hardware behavior. 593 594 if (ExtractedElts[Idx] == 0) { 595 ExtractedElts[Idx] = 596 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 597 ConstantInt::get(Type::getInt32Ty(II->getContext()), 598 Idx&15, false), "tmp"); 599 } 600 601 // Insert this value into the result vector. 602 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 603 ConstantInt::get(Type::getInt32Ty(II->getContext()), 604 i, false), "tmp"); 605 } 606 return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 607 } 608 } 609 break; 610 611 case Intrinsic::stackrestore: { 612 // If the save is right next to the restore, remove the restore. This can 613 // happen when variable allocas are DCE'd. 614 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) { 615 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 616 BasicBlock::iterator BI = SS; 617 if (&*++BI == II) 618 return EraseInstFromFunction(CI); 619 } 620 } 621 622 // Scan down this block to see if there is another stack restore in the 623 // same block without an intervening call/alloca. 624 BasicBlock::iterator BI = II; 625 TerminatorInst *TI = II->getParent()->getTerminator(); 626 bool CannotRemove = false; 627 for (++BI; &*BI != TI; ++BI) { 628 if (isa<AllocaInst>(BI) || isMalloc(BI)) { 629 CannotRemove = true; 630 break; 631 } 632 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 633 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) { 634 // If there is a stackrestore below this one, remove this one. 635 if (II->getIntrinsicID() == Intrinsic::stackrestore) 636 return EraseInstFromFunction(CI); 637 // Otherwise, ignore the intrinsic. 638 } else { 639 // If we found a non-intrinsic call, we can't remove the stack 640 // restore. 641 CannotRemove = true; 642 break; 643 } 644 } 645 } 646 647 // If the stack restore is in a return/unwind block and if there are no 648 // allocas or calls between the restore and the return, nuke the restore. 649 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI))) 650 return EraseInstFromFunction(CI); 651 break; 652 } 653 } 654 655 return visitCallSite(II); 656} 657 658// InvokeInst simplification 659// 660Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { 661 return visitCallSite(&II); 662} 663 664/// isSafeToEliminateVarargsCast - If this cast does not affect the value 665/// passed through the varargs area, we can eliminate the use of the cast. 666static bool isSafeToEliminateVarargsCast(const CallSite CS, 667 const CastInst * const CI, 668 const TargetData * const TD, 669 const int ix) { 670 if (!CI->isLosslessCast()) 671 return false; 672 673 // The size of ByVal arguments is derived from the type, so we 674 // can't change to a type with a different size. If the size were 675 // passed explicitly we could avoid this check. 676 if (!CS.paramHasAttr(ix, Attribute::ByVal)) 677 return true; 678 679 const Type* SrcTy = 680 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 681 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); 682 if (!SrcTy->isSized() || !DstTy->isSized()) 683 return false; 684 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) 685 return false; 686 return true; 687} 688 689// visitCallSite - Improvements for call and invoke instructions. 690// 691Instruction *InstCombiner::visitCallSite(CallSite CS) { 692 bool Changed = false; 693 694 // If the callee is a constexpr cast of a function, attempt to move the cast 695 // to the arguments of the call/invoke. 696 if (transformConstExprCastCall(CS)) return 0; 697 698 Value *Callee = CS.getCalledValue(); 699 700 if (Function *CalleeF = dyn_cast<Function>(Callee)) 701 // If the call and callee calling conventions don't match, this call must 702 // be unreachable, as the call is undefined. 703 if (CalleeF->getCallingConv() != CS.getCallingConv() && 704 // Only do this for calls to a function with a body. A prototype may 705 // not actually end up matching the implementation's calling conv for a 706 // variety of reasons (e.g. it may be written in assembly). 707 !CalleeF->isDeclaration()) { 708 Instruction *OldCall = CS.getInstruction(); 709 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 710 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 711 OldCall); 712 // If OldCall dues not return void then replaceAllUsesWith undef. 713 // This allows ValueHandlers and custom metadata to adjust itself. 714 if (!OldCall->getType()->isVoidTy()) 715 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); 716 if (isa<CallInst>(OldCall)) 717 return EraseInstFromFunction(*OldCall); 718 719 // We cannot remove an invoke, because it would change the CFG, just 720 // change the callee to a null pointer. 721 cast<InvokeInst>(OldCall)->setOperand(0, 722 Constant::getNullValue(CalleeF->getType())); 723 return 0; 724 } 725 726 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 727 // This instruction is not reachable, just remove it. We insert a store to 728 // undef so that we know that this code is not reachable, despite the fact 729 // that we can't modify the CFG here. 730 new StoreInst(ConstantInt::getTrue(Callee->getContext()), 731 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())), 732 CS.getInstruction()); 733 734 // If CS dues not return void then replaceAllUsesWith undef. 735 // This allows ValueHandlers and custom metadata to adjust itself. 736 if (!CS.getInstruction()->getType()->isVoidTy()) 737 CS.getInstruction()-> 738 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); 739 740 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { 741 // Don't break the CFG, insert a dummy cond branch. 742 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), 743 ConstantInt::getTrue(Callee->getContext()), II); 744 } 745 return EraseInstFromFunction(*CS.getInstruction()); 746 } 747 748 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee)) 749 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0))) 750 if (In->getIntrinsicID() == Intrinsic::init_trampoline) 751 return transformCallThroughTrampoline(CS); 752 753 const PointerType *PTy = cast<PointerType>(Callee->getType()); 754 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 755 if (FTy->isVarArg()) { 756 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1); 757 // See if we can optimize any arguments passed through the varargs area of 758 // the call. 759 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(), 760 E = CS.arg_end(); I != E; ++I, ++ix) { 761 CastInst *CI = dyn_cast<CastInst>(*I); 762 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) { 763 *I = CI->getOperand(0); 764 Changed = true; 765 } 766 } 767 } 768 769 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) { 770 // Inline asm calls cannot throw - mark them 'nounwind'. 771 CS.setDoesNotThrow(); 772 Changed = true; 773 } 774 775 return Changed ? CS.getInstruction() : 0; 776} 777 778// transformConstExprCastCall - If the callee is a constexpr cast of a function, 779// attempt to move the cast to the arguments of the call/invoke. 780// 781bool InstCombiner::transformConstExprCastCall(CallSite CS) { 782 if (!isa<ConstantExpr>(CS.getCalledValue())) return false; 783 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue()); 784 if (CE->getOpcode() != Instruction::BitCast || 785 !isa<Function>(CE->getOperand(0))) 786 return false; 787 Function *Callee = cast<Function>(CE->getOperand(0)); 788 Instruction *Caller = CS.getInstruction(); 789 const AttrListPtr &CallerPAL = CS.getAttributes(); 790 791 // Okay, this is a cast from a function to a different type. Unless doing so 792 // would cause a type conversion of one of our arguments, change this call to 793 // be a direct call with arguments casted to the appropriate types. 794 // 795 const FunctionType *FT = Callee->getFunctionType(); 796 const Type *OldRetTy = Caller->getType(); 797 const Type *NewRetTy = FT->getReturnType(); 798 799 if (isa<StructType>(NewRetTy)) 800 return false; // TODO: Handle multiple return values. 801 802 // Check to see if we are changing the return type... 803 if (OldRetTy != NewRetTy) { 804 if (Callee->isDeclaration() && 805 // Conversion is ok if changing from one pointer type to another or from 806 // a pointer to an integer of the same size. 807 !((isa<PointerType>(OldRetTy) || !TD || 808 OldRetTy == TD->getIntPtrType(Caller->getContext())) && 809 (isa<PointerType>(NewRetTy) || !TD || 810 NewRetTy == TD->getIntPtrType(Caller->getContext())))) 811 return false; // Cannot transform this return value. 812 813 if (!Caller->use_empty() && 814 // void -> non-void is handled specially 815 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy)) 816 return false; // Cannot transform this return value. 817 818 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 819 Attributes RAttrs = CallerPAL.getRetAttributes(); 820 if (RAttrs & Attribute::typeIncompatible(NewRetTy)) 821 return false; // Attribute not compatible with transformed value. 822 } 823 824 // If the callsite is an invoke instruction, and the return value is used by 825 // a PHI node in a successor, we cannot change the return type of the call 826 // because there is no place to put the cast instruction (without breaking 827 // the critical edge). Bail out in this case. 828 if (!Caller->use_empty()) 829 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 830 for (Value::use_iterator UI = II->use_begin(), E = II->use_end(); 831 UI != E; ++UI) 832 if (PHINode *PN = dyn_cast<PHINode>(*UI)) 833 if (PN->getParent() == II->getNormalDest() || 834 PN->getParent() == II->getUnwindDest()) 835 return false; 836 } 837 838 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); 839 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 840 841 CallSite::arg_iterator AI = CS.arg_begin(); 842 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 843 const Type *ParamTy = FT->getParamType(i); 844 const Type *ActTy = (*AI)->getType(); 845 846 if (!CastInst::isCastable(ActTy, ParamTy)) 847 return false; // Cannot transform this parameter value. 848 849 if (CallerPAL.getParamAttributes(i + 1) 850 & Attribute::typeIncompatible(ParamTy)) 851 return false; // Attribute not compatible with transformed value. 852 853 // Converting from one pointer type to another or between a pointer and an 854 // integer of the same size is safe even if we do not have a body. 855 bool isConvertible = ActTy == ParamTy || 856 (TD && ((isa<PointerType>(ParamTy) || 857 ParamTy == TD->getIntPtrType(Caller->getContext())) && 858 (isa<PointerType>(ActTy) || 859 ActTy == TD->getIntPtrType(Caller->getContext())))); 860 if (Callee->isDeclaration() && !isConvertible) return false; 861 } 862 863 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() && 864 Callee->isDeclaration()) 865 return false; // Do not delete arguments unless we have a function body. 866 867 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 868 !CallerPAL.isEmpty()) 869 // In this case we have more arguments than the new function type, but we 870 // won't be dropping them. Check that these extra arguments have attributes 871 // that are compatible with being a vararg call argument. 872 for (unsigned i = CallerPAL.getNumSlots(); i; --i) { 873 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) 874 break; 875 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; 876 if (PAttrs & Attribute::VarArgsIncompatible) 877 return false; 878 } 879 880 // Okay, we decided that this is a safe thing to do: go ahead and start 881 // inserting cast instructions as necessary... 882 std::vector<Value*> Args; 883 Args.reserve(NumActualArgs); 884 SmallVector<AttributeWithIndex, 8> attrVec; 885 attrVec.reserve(NumCommonArgs); 886 887 // Get any return attributes. 888 Attributes RAttrs = CallerPAL.getRetAttributes(); 889 890 // If the return value is not being used, the type may not be compatible 891 // with the existing attributes. Wipe out any problematic attributes. 892 RAttrs &= ~Attribute::typeIncompatible(NewRetTy); 893 894 // Add the new return attributes. 895 if (RAttrs) 896 attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); 897 898 AI = CS.arg_begin(); 899 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 900 const Type *ParamTy = FT->getParamType(i); 901 if ((*AI)->getType() == ParamTy) { 902 Args.push_back(*AI); 903 } else { 904 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, 905 false, ParamTy, false); 906 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp")); 907 } 908 909 // Add any parameter attributes. 910 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 911 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 912 } 913 914 // If the function takes more arguments than the call was taking, add them 915 // now. 916 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) 917 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 918 919 // If we are removing arguments to the function, emit an obnoxious warning. 920 if (FT->getNumParams() < NumActualArgs) { 921 if (!FT->isVarArg()) { 922 errs() << "WARNING: While resolving call to function '" 923 << Callee->getName() << "' arguments were dropped!\n"; 924 } else { 925 // Add all of the arguments in their promoted form to the arg list. 926 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 927 const Type *PTy = getPromotedType((*AI)->getType()); 928 if (PTy != (*AI)->getType()) { 929 // Must promote to pass through va_arg area! 930 Instruction::CastOps opcode = 931 CastInst::getCastOpcode(*AI, false, PTy, false); 932 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp")); 933 } else { 934 Args.push_back(*AI); 935 } 936 937 // Add any parameter attributes. 938 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) 939 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); 940 } 941 } 942 } 943 944 if (Attributes FnAttrs = CallerPAL.getFnAttributes()) 945 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); 946 947 if (NewRetTy->isVoidTy()) 948 Caller->setName(""); // Void type should not have a name. 949 950 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(), 951 attrVec.end()); 952 953 Instruction *NC; 954 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 955 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(), 956 Args.begin(), Args.end(), 957 Caller->getName(), Caller); 958 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv()); 959 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL); 960 } else { 961 NC = CallInst::Create(Callee, Args.begin(), Args.end(), 962 Caller->getName(), Caller); 963 CallInst *CI = cast<CallInst>(Caller); 964 if (CI->isTailCall()) 965 cast<CallInst>(NC)->setTailCall(); 966 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv()); 967 cast<CallInst>(NC)->setAttributes(NewCallerPAL); 968 } 969 970 // Insert a cast of the return type as necessary. 971 Value *NV = NC; 972 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 973 if (!NV->getType()->isVoidTy()) { 974 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false, 975 OldRetTy, false); 976 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp"); 977 978 // If this is an invoke instruction, we should insert it after the first 979 // non-phi, instruction in the normal successor block. 980 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 981 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI(); 982 InsertNewInstBefore(NC, *I); 983 } else { 984 // Otherwise, it's a call, just insert cast right after the call instr 985 InsertNewInstBefore(NC, *Caller); 986 } 987 Worklist.AddUsersToWorkList(*Caller); 988 } else { 989 NV = UndefValue::get(Caller->getType()); 990 } 991 } 992 993 994 if (!Caller->use_empty()) 995 Caller->replaceAllUsesWith(NV); 996 997 EraseInstFromFunction(*Caller); 998 return true; 999} 1000 1001// transformCallThroughTrampoline - Turn a call to a function created by the 1002// init_trampoline intrinsic into a direct call to the underlying function. 1003// 1004Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { 1005 Value *Callee = CS.getCalledValue(); 1006 const PointerType *PTy = cast<PointerType>(Callee->getType()); 1007 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 1008 const AttrListPtr &Attrs = CS.getAttributes(); 1009 1010 // If the call already has the 'nest' attribute somewhere then give up - 1011 // otherwise 'nest' would occur twice after splicing in the chain. 1012 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 1013 return 0; 1014 1015 IntrinsicInst *Tramp = 1016 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0)); 1017 1018 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts()); 1019 const PointerType *NestFPTy = cast<PointerType>(NestF->getType()); 1020 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType()); 1021 1022 const AttrListPtr &NestAttrs = NestF->getAttributes(); 1023 if (!NestAttrs.isEmpty()) { 1024 unsigned NestIdx = 1; 1025 const Type *NestTy = 0; 1026 Attributes NestAttr = Attribute::None; 1027 1028 // Look for a parameter marked with the 'nest' attribute. 1029 for (FunctionType::param_iterator I = NestFTy->param_begin(), 1030 E = NestFTy->param_end(); I != E; ++NestIdx, ++I) 1031 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { 1032 // Record the parameter type and any other attributes. 1033 NestTy = *I; 1034 NestAttr = NestAttrs.getParamAttributes(NestIdx); 1035 break; 1036 } 1037 1038 if (NestTy) { 1039 Instruction *Caller = CS.getInstruction(); 1040 std::vector<Value*> NewArgs; 1041 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1); 1042 1043 SmallVector<AttributeWithIndex, 8> NewAttrs; 1044 NewAttrs.reserve(Attrs.getNumSlots() + 1); 1045 1046 // Insert the nest argument into the call argument list, which may 1047 // mean appending it. Likewise for attributes. 1048 1049 // Add any result attributes. 1050 if (Attributes Attr = Attrs.getRetAttributes()) 1051 NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); 1052 1053 { 1054 unsigned Idx = 1; 1055 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 1056 do { 1057 if (Idx == NestIdx) { 1058 // Add the chain argument and attributes. 1059 Value *NestVal = Tramp->getOperand(3); 1060 if (NestVal->getType() != NestTy) 1061 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller); 1062 NewArgs.push_back(NestVal); 1063 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr)); 1064 } 1065 1066 if (I == E) 1067 break; 1068 1069 // Add the original argument and attributes. 1070 NewArgs.push_back(*I); 1071 if (Attributes Attr = Attrs.getParamAttributes(Idx)) 1072 NewAttrs.push_back 1073 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); 1074 1075 ++Idx, ++I; 1076 } while (1); 1077 } 1078 1079 // Add any function attributes. 1080 if (Attributes Attr = Attrs.getFnAttributes()) 1081 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); 1082 1083 // The trampoline may have been bitcast to a bogus type (FTy). 1084 // Handle this by synthesizing a new function type, equal to FTy 1085 // with the chain parameter inserted. 1086 1087 std::vector<const Type*> NewTypes; 1088 NewTypes.reserve(FTy->getNumParams()+1); 1089 1090 // Insert the chain's type into the list of parameter types, which may 1091 // mean appending it. 1092 { 1093 unsigned Idx = 1; 1094 FunctionType::param_iterator I = FTy->param_begin(), 1095 E = FTy->param_end(); 1096 1097 do { 1098 if (Idx == NestIdx) 1099 // Add the chain's type. 1100 NewTypes.push_back(NestTy); 1101 1102 if (I == E) 1103 break; 1104 1105 // Add the original type. 1106 NewTypes.push_back(*I); 1107 1108 ++Idx, ++I; 1109 } while (1); 1110 } 1111 1112 // Replace the trampoline call with a direct call. Let the generic 1113 // code sort out any function type mismatches. 1114 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 1115 FTy->isVarArg()); 1116 Constant *NewCallee = 1117 NestF->getType() == PointerType::getUnqual(NewFTy) ? 1118 NestF : ConstantExpr::getBitCast(NestF, 1119 PointerType::getUnqual(NewFTy)); 1120 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(), 1121 NewAttrs.end()); 1122 1123 Instruction *NewCaller; 1124 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 1125 NewCaller = InvokeInst::Create(NewCallee, 1126 II->getNormalDest(), II->getUnwindDest(), 1127 NewArgs.begin(), NewArgs.end(), 1128 Caller->getName(), Caller); 1129 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 1130 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 1131 } else { 1132 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(), 1133 Caller->getName(), Caller); 1134 if (cast<CallInst>(Caller)->isTailCall()) 1135 cast<CallInst>(NewCaller)->setTailCall(); 1136 cast<CallInst>(NewCaller)-> 1137 setCallingConv(cast<CallInst>(Caller)->getCallingConv()); 1138 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 1139 } 1140 if (!Caller->getType()->isVoidTy()) 1141 Caller->replaceAllUsesWith(NewCaller); 1142 Caller->eraseFromParent(); 1143 Worklist.Remove(Caller); 1144 return 0; 1145 } 1146 } 1147 1148 // Replace the trampoline call with a direct call. Since there is no 'nest' 1149 // parameter, there is no need to adjust the argument list. Let the generic 1150 // code sort out any function type mismatches. 1151 Constant *NewCallee = 1152 NestF->getType() == PTy ? NestF : 1153 ConstantExpr::getBitCast(NestF, PTy); 1154 CS.setCalledFunction(NewCallee); 1155 return CS.getInstruction(); 1156} 1157 1158