ConstantFolding.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines routines for folding instructions into constants. 11// 12// Also, to supplement the basic IR ConstantExpr simplifications, 13// this file defines some additional folding routines that can make use of 14// DataLayout information. These functions cannot go in IR due to library 15// dependency issues. 16// 17//===----------------------------------------------------------------------===// 18 19#include "llvm/Analysis/ConstantFolding.h" 20#include "llvm/ADT/SmallPtrSet.h" 21#include "llvm/ADT/SmallVector.h" 22#include "llvm/ADT/StringMap.h" 23#include "llvm/Analysis/ValueTracking.h" 24#include "llvm/IR/Constants.h" 25#include "llvm/IR/DataLayout.h" 26#include "llvm/IR/DerivedTypes.h" 27#include "llvm/IR/Function.h" 28#include "llvm/IR/GetElementPtrTypeIterator.h" 29#include "llvm/IR/GlobalVariable.h" 30#include "llvm/IR/Instructions.h" 31#include "llvm/IR/Intrinsics.h" 32#include "llvm/IR/Operator.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/FEnv.h" 35#include "llvm/Support/MathExtras.h" 36#include "llvm/Target/TargetLibraryInfo.h" 37#include <cerrno> 38#include <cmath> 39using namespace llvm; 40 41//===----------------------------------------------------------------------===// 42// Constant Folding internal helper functions 43//===----------------------------------------------------------------------===// 44 45/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with 46/// DataLayout. This always returns a non-null constant, but it may be a 47/// ConstantExpr if unfoldable. 48static Constant *FoldBitCast(Constant *C, Type *DestTy, 49 const DataLayout &TD) { 50 // Catch the obvious splat cases. 51 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 52 return Constant::getNullValue(DestTy); 53 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy()) 54 return Constant::getAllOnesValue(DestTy); 55 56 // Handle a vector->integer cast. 57 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) { 58 VectorType *VTy = dyn_cast<VectorType>(C->getType()); 59 if (VTy == 0) 60 return ConstantExpr::getBitCast(C, DestTy); 61 62 unsigned NumSrcElts = VTy->getNumElements(); 63 Type *SrcEltTy = VTy->getElementType(); 64 65 // If the vector is a vector of floating point, convert it to vector of int 66 // to simplify things. 67 if (SrcEltTy->isFloatingPointTy()) { 68 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 69 Type *SrcIVTy = 70 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 71 // Ask IR to do the conversion now that #elts line up. 72 C = ConstantExpr::getBitCast(C, SrcIVTy); 73 } 74 75 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); 76 if (CDV == 0) 77 return ConstantExpr::getBitCast(C, DestTy); 78 79 // Now that we know that the input value is a vector of integers, just shift 80 // and insert them into our result. 81 unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); 82 APInt Result(IT->getBitWidth(), 0); 83 for (unsigned i = 0; i != NumSrcElts; ++i) { 84 Result <<= BitShift; 85 if (TD.isLittleEndian()) 86 Result |= CDV->getElementAsInteger(NumSrcElts-i-1); 87 else 88 Result |= CDV->getElementAsInteger(i); 89 } 90 91 return ConstantInt::get(IT, Result); 92 } 93 94 // The code below only handles casts to vectors currently. 95 VectorType *DestVTy = dyn_cast<VectorType>(DestTy); 96 if (DestVTy == 0) 97 return ConstantExpr::getBitCast(C, DestTy); 98 99 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 100 // vector so the code below can handle it uniformly. 101 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 102 Constant *Ops = C; // don't take the address of C! 103 return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); 104 } 105 106 // If this is a bitcast from constant vector -> vector, fold it. 107 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 108 return ConstantExpr::getBitCast(C, DestTy); 109 110 // If the element types match, IR can fold it. 111 unsigned NumDstElt = DestVTy->getNumElements(); 112 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 113 if (NumDstElt == NumSrcElt) 114 return ConstantExpr::getBitCast(C, DestTy); 115 116 Type *SrcEltTy = C->getType()->getVectorElementType(); 117 Type *DstEltTy = DestVTy->getElementType(); 118 119 // Otherwise, we're changing the number of elements in a vector, which 120 // requires endianness information to do the right thing. For example, 121 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 122 // folds to (little endian): 123 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 124 // and to (big endian): 125 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 126 127 // First thing is first. We only want to think about integer here, so if 128 // we have something in FP form, recast it as integer. 129 if (DstEltTy->isFloatingPointTy()) { 130 // Fold to an vector of integers with same size as our FP type. 131 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 132 Type *DestIVTy = 133 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 134 // Recursively handle this integer conversion, if possible. 135 C = FoldBitCast(C, DestIVTy, TD); 136 137 // Finally, IR can handle this now that #elts line up. 138 return ConstantExpr::getBitCast(C, DestTy); 139 } 140 141 // Okay, we know the destination is integer, if the input is FP, convert 142 // it to integer first. 143 if (SrcEltTy->isFloatingPointTy()) { 144 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 145 Type *SrcIVTy = 146 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 147 // Ask IR to do the conversion now that #elts line up. 148 C = ConstantExpr::getBitCast(C, SrcIVTy); 149 // If IR wasn't able to fold it, bail out. 150 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 151 !isa<ConstantDataVector>(C)) 152 return C; 153 } 154 155 // Now we know that the input and output vectors are both integer vectors 156 // of the same size, and that their #elements is not the same. Do the 157 // conversion here, which depends on whether the input or output has 158 // more elements. 159 bool isLittleEndian = TD.isLittleEndian(); 160 161 SmallVector<Constant*, 32> Result; 162 if (NumDstElt < NumSrcElt) { 163 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 164 Constant *Zero = Constant::getNullValue(DstEltTy); 165 unsigned Ratio = NumSrcElt/NumDstElt; 166 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 167 unsigned SrcElt = 0; 168 for (unsigned i = 0; i != NumDstElt; ++i) { 169 // Build each element of the result. 170 Constant *Elt = Zero; 171 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 172 for (unsigned j = 0; j != Ratio; ++j) { 173 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); 174 if (!Src) // Reject constantexpr elements. 175 return ConstantExpr::getBitCast(C, DestTy); 176 177 // Zero extend the element to the right size. 178 Src = ConstantExpr::getZExt(Src, Elt->getType()); 179 180 // Shift it to the right place, depending on endianness. 181 Src = ConstantExpr::getShl(Src, 182 ConstantInt::get(Src->getType(), ShiftAmt)); 183 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 184 185 // Mix it in. 186 Elt = ConstantExpr::getOr(Elt, Src); 187 } 188 Result.push_back(Elt); 189 } 190 return ConstantVector::get(Result); 191 } 192 193 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 194 unsigned Ratio = NumDstElt/NumSrcElt; 195 unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); 196 197 // Loop over each source value, expanding into multiple results. 198 for (unsigned i = 0; i != NumSrcElt; ++i) { 199 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); 200 if (!Src) // Reject constantexpr elements. 201 return ConstantExpr::getBitCast(C, DestTy); 202 203 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 204 for (unsigned j = 0; j != Ratio; ++j) { 205 // Shift the piece of the value into the right place, depending on 206 // endianness. 207 Constant *Elt = ConstantExpr::getLShr(Src, 208 ConstantInt::get(Src->getType(), ShiftAmt)); 209 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 210 211 // Truncate and remember this piece. 212 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 213 } 214 } 215 216 return ConstantVector::get(Result); 217} 218 219 220/// IsConstantOffsetFromGlobal - If this constant is actually a constant offset 221/// from a global, return the global and the constant. Because of 222/// constantexprs, this function is recursive. 223static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 224 APInt &Offset, const DataLayout &TD) { 225 // Trivial case, constant is the global. 226 if ((GV = dyn_cast<GlobalValue>(C))) { 227 unsigned BitWidth = TD.getPointerTypeSizeInBits(GV->getType()); 228 Offset = APInt(BitWidth, 0); 229 return true; 230 } 231 232 // Otherwise, if this isn't a constant expr, bail out. 233 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 234 if (!CE) return false; 235 236 // Look through ptr->int and ptr->ptr casts. 237 if (CE->getOpcode() == Instruction::PtrToInt || 238 CE->getOpcode() == Instruction::BitCast) 239 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); 240 241 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 242 GEPOperator *GEP = dyn_cast<GEPOperator>(CE); 243 if (!GEP) 244 return false; 245 246 unsigned BitWidth = TD.getPointerTypeSizeInBits(GEP->getType()); 247 APInt TmpOffset(BitWidth, 0); 248 249 // If the base isn't a global+constant, we aren't either. 250 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, TD)) 251 return false; 252 253 // Otherwise, add any offset that our operands provide. 254 if (!GEP->accumulateConstantOffset(TD, TmpOffset)) 255 return false; 256 257 Offset = TmpOffset; 258 return true; 259} 260 261/// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the 262/// constant being copied out of. ByteOffset is an offset into C. CurPtr is the 263/// pointer to copy results into and BytesLeft is the number of bytes left in 264/// the CurPtr buffer. TD is the target data. 265static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, 266 unsigned char *CurPtr, unsigned BytesLeft, 267 const DataLayout &TD) { 268 assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && 269 "Out of range access"); 270 271 // If this element is zero or undefined, we can just return since *CurPtr is 272 // zero initialized. 273 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 274 return true; 275 276 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 277 if (CI->getBitWidth() > 64 || 278 (CI->getBitWidth() & 7) != 0) 279 return false; 280 281 uint64_t Val = CI->getZExtValue(); 282 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 283 284 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 285 int n = ByteOffset; 286 if (!TD.isLittleEndian()) 287 n = IntBytes - n - 1; 288 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 289 ++ByteOffset; 290 } 291 return true; 292 } 293 294 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 295 if (CFP->getType()->isDoubleTy()) { 296 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); 297 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 298 } 299 if (CFP->getType()->isFloatTy()){ 300 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD); 301 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 302 } 303 if (CFP->getType()->isHalfTy()){ 304 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD); 305 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 306 } 307 return false; 308 } 309 310 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { 311 const StructLayout *SL = TD.getStructLayout(CS->getType()); 312 unsigned Index = SL->getElementContainingOffset(ByteOffset); 313 uint64_t CurEltOffset = SL->getElementOffset(Index); 314 ByteOffset -= CurEltOffset; 315 316 while (1) { 317 // If the element access is to the element itself and not to tail padding, 318 // read the bytes from the element. 319 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); 320 321 if (ByteOffset < EltSize && 322 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 323 BytesLeft, TD)) 324 return false; 325 326 ++Index; 327 328 // Check to see if we read from the last struct element, if so we're done. 329 if (Index == CS->getType()->getNumElements()) 330 return true; 331 332 // If we read all of the bytes we needed from this element we're done. 333 uint64_t NextEltOffset = SL->getElementOffset(Index); 334 335 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 336 return true; 337 338 // Move to the next element of the struct. 339 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 340 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 341 ByteOffset = 0; 342 CurEltOffset = NextEltOffset; 343 } 344 // not reached. 345 } 346 347 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 348 isa<ConstantDataSequential>(C)) { 349 Type *EltTy = C->getType()->getSequentialElementType(); 350 uint64_t EltSize = TD.getTypeAllocSize(EltTy); 351 uint64_t Index = ByteOffset / EltSize; 352 uint64_t Offset = ByteOffset - Index * EltSize; 353 uint64_t NumElts; 354 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType())) 355 NumElts = AT->getNumElements(); 356 else 357 NumElts = C->getType()->getVectorNumElements(); 358 359 for (; Index != NumElts; ++Index) { 360 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 361 BytesLeft, TD)) 362 return false; 363 364 uint64_t BytesWritten = EltSize - Offset; 365 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 366 if (BytesWritten >= BytesLeft) 367 return true; 368 369 Offset = 0; 370 BytesLeft -= BytesWritten; 371 CurPtr += BytesWritten; 372 } 373 return true; 374 } 375 376 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 377 if (CE->getOpcode() == Instruction::IntToPtr && 378 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType())) { 379 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 380 BytesLeft, TD); 381 } 382 } 383 384 // Otherwise, unknown initializer type. 385 return false; 386} 387 388static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, 389 const DataLayout &TD) { 390 PointerType *PTy = cast<PointerType>(C->getType()); 391 Type *LoadTy = PTy->getElementType(); 392 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); 393 394 // If this isn't an integer load we can't fold it directly. 395 if (!IntType) { 396 unsigned AS = PTy->getAddressSpace(); 397 398 // If this is a float/double load, we can try folding it as an int32/64 load 399 // and then bitcast the result. This can be useful for union cases. Note 400 // that address spaces don't matter here since we're not going to result in 401 // an actual new load. 402 Type *MapTy; 403 if (LoadTy->isHalfTy()) 404 MapTy = Type::getInt16PtrTy(C->getContext(), AS); 405 else if (LoadTy->isFloatTy()) 406 MapTy = Type::getInt32PtrTy(C->getContext(), AS); 407 else if (LoadTy->isDoubleTy()) 408 MapTy = Type::getInt64PtrTy(C->getContext(), AS); 409 else if (LoadTy->isVectorTy()) { 410 MapTy = PointerType::getIntNPtrTy(C->getContext(), 411 TD.getTypeAllocSizeInBits(LoadTy), 412 AS); 413 } else 414 return 0; 415 416 C = FoldBitCast(C, MapTy, TD); 417 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD)) 418 return FoldBitCast(Res, LoadTy, TD); 419 return 0; 420 } 421 422 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 423 if (BytesLoaded > 32 || BytesLoaded == 0) 424 return 0; 425 426 GlobalValue *GVal; 427 APInt Offset; 428 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) 429 return 0; 430 431 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); 432 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 433 !GV->getInitializer()->getType()->isSized()) 434 return 0; 435 436 // If we're loading off the beginning of the global, some bytes may be valid, 437 // but we don't try to handle this. 438 if (Offset.isNegative()) 439 return 0; 440 441 // If we're not accessing anything in this constant, the result is undefined. 442 if (Offset.getZExtValue() >= 443 TD.getTypeAllocSize(GV->getInitializer()->getType())) 444 return UndefValue::get(IntType); 445 446 unsigned char RawBytes[32] = {0}; 447 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes, 448 BytesLoaded, TD)) 449 return 0; 450 451 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 452 if (TD.isLittleEndian()) { 453 ResultVal = RawBytes[BytesLoaded - 1]; 454 for (unsigned i = 1; i != BytesLoaded; ++i) { 455 ResultVal <<= 8; 456 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 457 } 458 } else { 459 ResultVal = RawBytes[0]; 460 for (unsigned i = 1; i != BytesLoaded; ++i) { 461 ResultVal <<= 8; 462 ResultVal |= RawBytes[i]; 463 } 464 } 465 466 return ConstantInt::get(IntType->getContext(), ResultVal); 467} 468 469/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would 470/// produce if it is constant and determinable. If this is not determinable, 471/// return null. 472Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, 473 const DataLayout *TD) { 474 // First, try the easy cases: 475 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 476 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 477 return GV->getInitializer(); 478 479 // If the loaded value isn't a constant expr, we can't handle it. 480 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 481 if (!CE) 482 return 0; 483 484 if (CE->getOpcode() == Instruction::GetElementPtr) { 485 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 486 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 487 if (Constant *V = 488 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 489 return V; 490 } 491 } 492 } 493 494 // Instead of loading constant c string, use corresponding integer value 495 // directly if string length is small enough. 496 StringRef Str; 497 if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) { 498 unsigned StrLen = Str.size(); 499 Type *Ty = cast<PointerType>(CE->getType())->getElementType(); 500 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 501 // Replace load with immediate integer if the result is an integer or fp 502 // value. 503 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 504 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 505 APInt StrVal(NumBits, 0); 506 APInt SingleChar(NumBits, 0); 507 if (TD->isLittleEndian()) { 508 for (signed i = StrLen-1; i >= 0; i--) { 509 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 510 StrVal = (StrVal << 8) | SingleChar; 511 } 512 } else { 513 for (unsigned i = 0; i < StrLen; i++) { 514 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 515 StrVal = (StrVal << 8) | SingleChar; 516 } 517 // Append NULL at the end. 518 SingleChar = 0; 519 StrVal = (StrVal << 8) | SingleChar; 520 } 521 522 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 523 if (Ty->isFloatingPointTy()) 524 Res = ConstantExpr::getBitCast(Res, Ty); 525 return Res; 526 } 527 } 528 529 // If this load comes from anywhere in a constant global, and if the global 530 // is all undef or zero, we know what it loads. 531 if (GlobalVariable *GV = 532 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) { 533 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 534 Type *ResTy = cast<PointerType>(C->getType())->getElementType(); 535 if (GV->getInitializer()->isNullValue()) 536 return Constant::getNullValue(ResTy); 537 if (isa<UndefValue>(GV->getInitializer())) 538 return UndefValue::get(ResTy); 539 } 540 } 541 542 // Try hard to fold loads from bitcasted strange and non-type-safe things. 543 if (TD) 544 return FoldReinterpretLoadFromConstPtr(CE, *TD); 545 return 0; 546} 547 548static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ 549 if (LI->isVolatile()) return 0; 550 551 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) 552 return ConstantFoldLoadFromConstPtr(C, TD); 553 554 return 0; 555} 556 557/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. 558/// Attempt to symbolically evaluate the result of a binary operator merging 559/// these together. If target data info is available, it is provided as DL, 560/// otherwise DL is null. 561static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, 562 Constant *Op1, const DataLayout *DL){ 563 // SROA 564 565 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 566 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 567 // bits. 568 569 570 if (Opc == Instruction::And && DL) { 571 unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType()); 572 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); 573 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); 574 ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL); 575 ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL); 576 if ((KnownOne1 | KnownZero0).isAllOnesValue()) { 577 // All the bits of Op0 that the 'and' could be masking are already zero. 578 return Op0; 579 } 580 if ((KnownOne0 | KnownZero1).isAllOnesValue()) { 581 // All the bits of Op1 that the 'and' could be masking are already zero. 582 return Op1; 583 } 584 585 APInt KnownZero = KnownZero0 | KnownZero1; 586 APInt KnownOne = KnownOne0 & KnownOne1; 587 if ((KnownZero | KnownOne).isAllOnesValue()) { 588 return ConstantInt::get(Op0->getType(), KnownOne); 589 } 590 } 591 592 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 593 // constant. This happens frequently when iterating over a global array. 594 if (Opc == Instruction::Sub && DL) { 595 GlobalValue *GV1, *GV2; 596 APInt Offs1, Offs2; 597 598 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL)) 599 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) && 600 GV1 == GV2) { 601 unsigned OpSize = DL->getTypeSizeInBits(Op0->getType()); 602 603 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 604 // PtrToInt may change the bitwidth so we have convert to the right size 605 // first. 606 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 607 Offs2.zextOrTrunc(OpSize)); 608 } 609 } 610 611 return 0; 612} 613 614/// CastGEPIndices - If array indices are not pointer-sized integers, 615/// explicitly cast them so that they aren't implicitly casted by the 616/// getelementptr. 617static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, 618 Type *ResultTy, const DataLayout *TD, 619 const TargetLibraryInfo *TLI) { 620 if (!TD) 621 return 0; 622 623 Type *IntPtrTy = TD->getIntPtrType(ResultTy); 624 625 bool Any = false; 626 SmallVector<Constant*, 32> NewIdxs; 627 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 628 if ((i == 1 || 629 !isa<StructType>(GetElementPtrInst::getIndexedType( 630 Ops[0]->getType(), 631 Ops.slice(1, i - 1)))) && 632 Ops[i]->getType() != IntPtrTy) { 633 Any = true; 634 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 635 true, 636 IntPtrTy, 637 true), 638 Ops[i], IntPtrTy)); 639 } else 640 NewIdxs.push_back(Ops[i]); 641 } 642 643 if (!Any) 644 return 0; 645 646 Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs); 647 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 648 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) 649 C = Folded; 650 } 651 652 return C; 653} 654 655/// Strip the pointer casts, but preserve the address space information. 656static Constant* StripPtrCastKeepAS(Constant* Ptr) { 657 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 658 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType()); 659 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 660 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType()); 661 662 // Preserve the address space number of the pointer. 663 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 664 NewPtrTy = NewPtrTy->getElementType()->getPointerTo( 665 OldPtrTy->getAddressSpace()); 666 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 667 } 668 return Ptr; 669} 670 671/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP 672/// constant expression, do so. 673static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, 674 Type *ResultTy, const DataLayout *TD, 675 const TargetLibraryInfo *TLI) { 676 Constant *Ptr = Ops[0]; 677 if (!TD || !Ptr->getType()->getPointerElementType()->isSized() || 678 !Ptr->getType()->isPointerTy()) 679 return 0; 680 681 Type *IntPtrTy = TD->getIntPtrType(Ptr->getType()); 682 Type *ResultElementTy = ResultTy->getPointerElementType(); 683 684 // If this is a constant expr gep that is effectively computing an 685 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 686 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 687 if (!isa<ConstantInt>(Ops[i])) { 688 689 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 690 // "inttoptr (sub (ptrtoint Ptr), V)" 691 if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) { 692 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]); 693 assert((CE == 0 || CE->getType() == IntPtrTy) && 694 "CastGEPIndices didn't canonicalize index types!"); 695 if (CE && CE->getOpcode() == Instruction::Sub && 696 CE->getOperand(0)->isNullValue()) { 697 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 698 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 699 Res = ConstantExpr::getIntToPtr(Res, ResultTy); 700 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res)) 701 Res = ConstantFoldConstantExpression(ResCE, TD, TLI); 702 return Res; 703 } 704 } 705 return 0; 706 } 707 708 unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy); 709 APInt Offset = 710 APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), 711 makeArrayRef((Value *const*) 712 Ops.data() + 1, 713 Ops.size() - 1))); 714 Ptr = StripPtrCastKeepAS(Ptr); 715 716 // If this is a GEP of a GEP, fold it all into a single GEP. 717 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 718 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 719 720 // Do not try the incorporate the sub-GEP if some index is not a number. 721 bool AllConstantInt = true; 722 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) 723 if (!isa<ConstantInt>(NestedOps[i])) { 724 AllConstantInt = false; 725 break; 726 } 727 if (!AllConstantInt) 728 break; 729 730 Ptr = cast<Constant>(GEP->getOperand(0)); 731 Offset += APInt(BitWidth, 732 TD->getIndexedOffset(Ptr->getType(), NestedOps)); 733 Ptr = StripPtrCastKeepAS(Ptr); 734 } 735 736 // If the base value for this address is a literal integer value, fold the 737 // getelementptr to the resulting integer value casted to the pointer type. 738 APInt BasePtr(BitWidth, 0); 739 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 740 if (CE->getOpcode() == Instruction::IntToPtr) { 741 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 742 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 743 } 744 } 745 746 if (Ptr->isNullValue() || BasePtr != 0) { 747 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 748 return ConstantExpr::getIntToPtr(C, ResultTy); 749 } 750 751 // Otherwise form a regular getelementptr. Recompute the indices so that 752 // we eliminate over-indexing of the notional static type array bounds. 753 // This makes it easy to determine if the getelementptr is "inbounds". 754 // Also, this helps GlobalOpt do SROA on GlobalVariables. 755 Type *Ty = Ptr->getType(); 756 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); 757 SmallVector<Constant *, 32> NewIdxs; 758 759 do { 760 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { 761 if (ATy->isPointerTy()) { 762 // The only pointer indexing we'll do is on the first index of the GEP. 763 if (!NewIdxs.empty()) 764 break; 765 766 // Only handle pointers to sized types, not pointers to functions. 767 if (!ATy->getElementType()->isSized()) 768 return 0; 769 } 770 771 // Determine which element of the array the offset points into. 772 APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); 773 if (ElemSize == 0) 774 // The element size is 0. This may be [0 x Ty]*, so just use a zero 775 // index for this level and proceed to the next level to see if it can 776 // accommodate the offset. 777 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 778 else { 779 // The element size is non-zero divide the offset by the element 780 // size (rounding down), to compute the index at this level. 781 APInt NewIdx = Offset.udiv(ElemSize); 782 Offset -= NewIdx * ElemSize; 783 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 784 } 785 Ty = ATy->getElementType(); 786 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 787 // If we end up with an offset that isn't valid for this struct type, we 788 // can't re-form this GEP in a regular form, so bail out. The pointer 789 // operand likely went through casts that are necessary to make the GEP 790 // sensible. 791 const StructLayout &SL = *TD->getStructLayout(STy); 792 if (Offset.uge(SL.getSizeInBytes())) 793 break; 794 795 // Determine which field of the struct the offset points into. The 796 // getZExtValue is fine as we've already ensured that the offset is 797 // within the range representable by the StructLayout API. 798 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 799 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 800 ElIdx)); 801 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 802 Ty = STy->getTypeAtIndex(ElIdx); 803 } else { 804 // We've reached some non-indexable type. 805 break; 806 } 807 } while (Ty != ResultElementTy); 808 809 // If we haven't used up the entire offset by descending the static 810 // type, then the offset is pointing into the middle of an indivisible 811 // member, so we can't simplify it. 812 if (Offset != 0) 813 return 0; 814 815 // Create a GEP. 816 Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs); 817 assert(C->getType()->getPointerElementType() == Ty && 818 "Computed GetElementPtr has unexpected type!"); 819 820 // If we ended up indexing a member with a type that doesn't match 821 // the type of what the original indices indexed, add a cast. 822 if (Ty != ResultElementTy) 823 C = FoldBitCast(C, ResultTy, *TD); 824 825 return C; 826} 827 828 829 830//===----------------------------------------------------------------------===// 831// Constant Folding public APIs 832//===----------------------------------------------------------------------===// 833 834/// ConstantFoldInstruction - Try to constant fold the specified instruction. 835/// If successful, the constant result is returned, if not, null is returned. 836/// Note that this fails if not all of the operands are constant. Otherwise, 837/// this function can only fail when attempting to fold instructions like loads 838/// and stores, which have no constant expression form. 839Constant *llvm::ConstantFoldInstruction(Instruction *I, 840 const DataLayout *TD, 841 const TargetLibraryInfo *TLI) { 842 // Handle PHI nodes quickly here... 843 if (PHINode *PN = dyn_cast<PHINode>(I)) { 844 Constant *CommonValue = 0; 845 846 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 847 Value *Incoming = PN->getIncomingValue(i); 848 // If the incoming value is undef then skip it. Note that while we could 849 // skip the value if it is equal to the phi node itself we choose not to 850 // because that would break the rule that constant folding only applies if 851 // all operands are constants. 852 if (isa<UndefValue>(Incoming)) 853 continue; 854 // If the incoming value is not a constant, then give up. 855 Constant *C = dyn_cast<Constant>(Incoming); 856 if (!C) 857 return 0; 858 // Fold the PHI's operands. 859 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C)) 860 C = ConstantFoldConstantExpression(NewC, TD, TLI); 861 // If the incoming value is a different constant to 862 // the one we saw previously, then give up. 863 if (CommonValue && C != CommonValue) 864 return 0; 865 CommonValue = C; 866 } 867 868 869 // If we reach here, all incoming values are the same constant or undef. 870 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 871 } 872 873 // Scan the operand list, checking to see if they are all constants, if so, 874 // hand off to ConstantFoldInstOperands. 875 SmallVector<Constant*, 8> Ops; 876 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { 877 Constant *Op = dyn_cast<Constant>(*i); 878 if (!Op) 879 return 0; // All operands not constant! 880 881 // Fold the Instruction's operands. 882 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op)) 883 Op = ConstantFoldConstantExpression(NewCE, TD, TLI); 884 885 Ops.push_back(Op); 886 } 887 888 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 889 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 890 TD, TLI); 891 892 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 893 return ConstantFoldLoadInst(LI, TD); 894 895 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) { 896 return ConstantExpr::getInsertValue( 897 cast<Constant>(IVI->getAggregateOperand()), 898 cast<Constant>(IVI->getInsertedValueOperand()), 899 IVI->getIndices()); 900 } 901 902 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) { 903 return ConstantExpr::getExtractValue( 904 cast<Constant>(EVI->getAggregateOperand()), 905 EVI->getIndices()); 906 } 907 908 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI); 909} 910 911static Constant * 912ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD, 913 const TargetLibraryInfo *TLI, 914 SmallPtrSet<ConstantExpr *, 4> &FoldedOps) { 915 SmallVector<Constant *, 8> Ops; 916 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; 917 ++i) { 918 Constant *NewC = cast<Constant>(*i); 919 // Recursively fold the ConstantExpr's operands. If we have already folded 920 // a ConstantExpr, we don't have to process it again. 921 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) { 922 if (FoldedOps.insert(NewCE)) 923 NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps); 924 } 925 Ops.push_back(NewC); 926 } 927 928 if (CE->isCompare()) 929 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 930 TD, TLI); 931 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI); 932} 933 934/// ConstantFoldConstantExpression - Attempt to fold the constant expression 935/// using the specified DataLayout. If successful, the constant result is 936/// result is returned, if not, null is returned. 937Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, 938 const DataLayout *TD, 939 const TargetLibraryInfo *TLI) { 940 SmallPtrSet<ConstantExpr *, 4> FoldedOps; 941 return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps); 942} 943 944/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the 945/// specified opcode and operands. If successful, the constant result is 946/// returned, if not, null is returned. Note that this function can fail when 947/// attempting to fold instructions like loads and stores, which have no 948/// constant expression form. 949/// 950/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc 951/// information, due to only being passed an opcode and operands. Constant 952/// folding using this function strips this information. 953/// 954Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 955 ArrayRef<Constant *> Ops, 956 const DataLayout *TD, 957 const TargetLibraryInfo *TLI) { 958 // Handle easy binops first. 959 if (Instruction::isBinaryOp(Opcode)) { 960 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) { 961 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) 962 return C; 963 } 964 965 return ConstantExpr::get(Opcode, Ops[0], Ops[1]); 966 } 967 968 switch (Opcode) { 969 default: return 0; 970 case Instruction::ICmp: 971 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 972 case Instruction::Call: 973 if (Function *F = dyn_cast<Function>(Ops.back())) 974 if (canConstantFoldCallTo(F)) 975 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); 976 return 0; 977 case Instruction::PtrToInt: 978 // If the input is a inttoptr, eliminate the pair. This requires knowing 979 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 980 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 981 if (TD && CE->getOpcode() == Instruction::IntToPtr) { 982 Constant *Input = CE->getOperand(0); 983 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 984 unsigned PtrWidth = TD->getPointerTypeSizeInBits(CE->getType()); 985 if (PtrWidth < InWidth) { 986 Constant *Mask = 987 ConstantInt::get(CE->getContext(), 988 APInt::getLowBitsSet(InWidth, PtrWidth)); 989 Input = ConstantExpr::getAnd(Input, Mask); 990 } 991 // Do a zext or trunc to get to the dest size. 992 return ConstantExpr::getIntegerCast(Input, DestTy, false); 993 } 994 } 995 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 996 case Instruction::IntToPtr: 997 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 998 // the int size is >= the ptr size and the address spaces are the same. 999 // This requires knowing the width of a pointer, so it can't be done in 1000 // ConstantExpr::getCast. 1001 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1002 if (TD && CE->getOpcode() == Instruction::PtrToInt) { 1003 Constant *SrcPtr = CE->getOperand(0); 1004 unsigned SrcPtrSize = TD->getPointerTypeSizeInBits(SrcPtr->getType()); 1005 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1006 1007 if (MidIntSize >= SrcPtrSize) { 1008 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1009 if (SrcAS == DestTy->getPointerAddressSpace()) 1010 return FoldBitCast(CE->getOperand(0), DestTy, *TD); 1011 } 1012 } 1013 } 1014 1015 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1016 case Instruction::Trunc: 1017 case Instruction::ZExt: 1018 case Instruction::SExt: 1019 case Instruction::FPTrunc: 1020 case Instruction::FPExt: 1021 case Instruction::UIToFP: 1022 case Instruction::SIToFP: 1023 case Instruction::FPToUI: 1024 case Instruction::FPToSI: 1025 case Instruction::AddrSpaceCast: 1026 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1027 case Instruction::BitCast: 1028 if (TD) 1029 return FoldBitCast(Ops[0], DestTy, *TD); 1030 return ConstantExpr::getBitCast(Ops[0], DestTy); 1031 case Instruction::Select: 1032 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1033 case Instruction::ExtractElement: 1034 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1035 case Instruction::InsertElement: 1036 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1037 case Instruction::ShuffleVector: 1038 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 1039 case Instruction::GetElementPtr: 1040 if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI)) 1041 return C; 1042 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI)) 1043 return C; 1044 1045 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); 1046 } 1047} 1048 1049/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare 1050/// instruction (icmp/fcmp) with the specified operands. If it fails, it 1051/// returns a constant expression of the specified operands. 1052/// 1053Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1054 Constant *Ops0, Constant *Ops1, 1055 const DataLayout *TD, 1056 const TargetLibraryInfo *TLI) { 1057 // fold: icmp (inttoptr x), null -> icmp x, 0 1058 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1059 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1060 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1061 // 1062 // ConstantExpr::getCompare cannot do this, because it doesn't have TD 1063 // around to know if bit truncation is happening. 1064 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1065 if (TD && Ops1->isNullValue()) { 1066 if (CE0->getOpcode() == Instruction::IntToPtr) { 1067 Type *IntPtrTy = TD->getIntPtrType(CE0->getType()); 1068 // Convert the integer value to the right size to ensure we get the 1069 // proper extension or truncation. 1070 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1071 IntPtrTy, false); 1072 Constant *Null = Constant::getNullValue(C->getType()); 1073 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1074 } 1075 1076 // Only do this transformation if the int is intptrty in size, otherwise 1077 // there is a truncation or extension that we aren't modeling. 1078 if (CE0->getOpcode() == Instruction::PtrToInt) { 1079 Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType()); 1080 if (CE0->getType() == IntPtrTy) { 1081 Constant *C = CE0->getOperand(0); 1082 Constant *Null = Constant::getNullValue(C->getType()); 1083 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1084 } 1085 } 1086 } 1087 1088 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1089 if (TD && CE0->getOpcode() == CE1->getOpcode()) { 1090 if (CE0->getOpcode() == Instruction::IntToPtr) { 1091 Type *IntPtrTy = TD->getIntPtrType(CE0->getType()); 1092 1093 // Convert the integer value to the right size to ensure we get the 1094 // proper extension or truncation. 1095 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1096 IntPtrTy, false); 1097 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1098 IntPtrTy, false); 1099 return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI); 1100 } 1101 1102 // Only do this transformation if the int is intptrty in size, otherwise 1103 // there is a truncation or extension that we aren't modeling. 1104 if (CE0->getOpcode() == Instruction::PtrToInt) { 1105 Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType()); 1106 if (CE0->getType() == IntPtrTy && 1107 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1108 return ConstantFoldCompareInstOperands(Predicate, 1109 CE0->getOperand(0), 1110 CE1->getOperand(0), 1111 TD, 1112 TLI); 1113 } 1114 } 1115 } 1116 } 1117 1118 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1119 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1120 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1121 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1122 Constant *LHS = 1123 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1, 1124 TD, TLI); 1125 Constant *RHS = 1126 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1, 1127 TD, TLI); 1128 unsigned OpC = 1129 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1130 Constant *Ops[] = { LHS, RHS }; 1131 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI); 1132 } 1133 } 1134 1135 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1136} 1137 1138 1139/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a 1140/// getelementptr constantexpr, return the constant value being addressed by the 1141/// constant expression, or null if something is funny and we can't decide. 1142Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1143 ConstantExpr *CE) { 1144 if (!CE->getOperand(1)->isNullValue()) 1145 return 0; // Do not allow stepping over the value! 1146 1147 // Loop over all of the operands, tracking down which value we are 1148 // addressing. 1149 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1150 C = C->getAggregateElement(CE->getOperand(i)); 1151 if (C == 0) 1152 return 0; 1153 } 1154 return C; 1155} 1156 1157/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr 1158/// indices (with an *implied* zero pointer index that is not in the list), 1159/// return the constant value being addressed by a virtual load, or null if 1160/// something is funny and we can't decide. 1161Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1162 ArrayRef<Constant*> Indices) { 1163 // Loop over all of the operands, tracking down which value we are 1164 // addressing. 1165 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1166 C = C->getAggregateElement(Indices[i]); 1167 if (C == 0) 1168 return 0; 1169 } 1170 return C; 1171} 1172 1173 1174//===----------------------------------------------------------------------===// 1175// Constant Folding for Calls 1176// 1177 1178/// canConstantFoldCallTo - Return true if its even possible to fold a call to 1179/// the specified function. 1180bool llvm::canConstantFoldCallTo(const Function *F) { 1181 switch (F->getIntrinsicID()) { 1182 case Intrinsic::fabs: 1183 case Intrinsic::log: 1184 case Intrinsic::log2: 1185 case Intrinsic::log10: 1186 case Intrinsic::exp: 1187 case Intrinsic::exp2: 1188 case Intrinsic::floor: 1189 case Intrinsic::ceil: 1190 case Intrinsic::sqrt: 1191 case Intrinsic::pow: 1192 case Intrinsic::powi: 1193 case Intrinsic::bswap: 1194 case Intrinsic::ctpop: 1195 case Intrinsic::ctlz: 1196 case Intrinsic::cttz: 1197 case Intrinsic::fma: 1198 case Intrinsic::fmuladd: 1199 case Intrinsic::copysign: 1200 case Intrinsic::round: 1201 case Intrinsic::sadd_with_overflow: 1202 case Intrinsic::uadd_with_overflow: 1203 case Intrinsic::ssub_with_overflow: 1204 case Intrinsic::usub_with_overflow: 1205 case Intrinsic::smul_with_overflow: 1206 case Intrinsic::umul_with_overflow: 1207 case Intrinsic::convert_from_fp16: 1208 case Intrinsic::convert_to_fp16: 1209 case Intrinsic::x86_sse_cvtss2si: 1210 case Intrinsic::x86_sse_cvtss2si64: 1211 case Intrinsic::x86_sse_cvttss2si: 1212 case Intrinsic::x86_sse_cvttss2si64: 1213 case Intrinsic::x86_sse2_cvtsd2si: 1214 case Intrinsic::x86_sse2_cvtsd2si64: 1215 case Intrinsic::x86_sse2_cvttsd2si: 1216 case Intrinsic::x86_sse2_cvttsd2si64: 1217 return true; 1218 default: 1219 return false; 1220 case 0: break; 1221 } 1222 1223 if (!F->hasName()) 1224 return false; 1225 StringRef Name = F->getName(); 1226 1227 // In these cases, the check of the length is required. We don't want to 1228 // return true for a name like "cos\0blah" which strcmp would return equal to 1229 // "cos", but has length 8. 1230 switch (Name[0]) { 1231 default: return false; 1232 case 'a': 1233 return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2"; 1234 case 'c': 1235 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; 1236 case 'e': 1237 return Name == "exp" || Name == "exp2"; 1238 case 'f': 1239 return Name == "fabs" || Name == "fmod" || Name == "floor"; 1240 case 'l': 1241 return Name == "log" || Name == "log10"; 1242 case 'p': 1243 return Name == "pow"; 1244 case 's': 1245 return Name == "sin" || Name == "sinh" || Name == "sqrt" || 1246 Name == "sinf" || Name == "sqrtf"; 1247 case 't': 1248 return Name == "tan" || Name == "tanh"; 1249 } 1250} 1251 1252static Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1253 if (Ty->isHalfTy()) { 1254 APFloat APF(V); 1255 bool unused; 1256 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); 1257 return ConstantFP::get(Ty->getContext(), APF); 1258 } 1259 if (Ty->isFloatTy()) 1260 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1261 if (Ty->isDoubleTy()) 1262 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1263 llvm_unreachable("Can only constant fold half/float/double"); 1264 1265} 1266 1267static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 1268 Type *Ty) { 1269 sys::llvm_fenv_clearexcept(); 1270 V = NativeFP(V); 1271 if (sys::llvm_fenv_testexcept()) { 1272 sys::llvm_fenv_clearexcept(); 1273 return 0; 1274 } 1275 1276 return GetConstantFoldFPValue(V, Ty); 1277} 1278 1279static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1280 double V, double W, Type *Ty) { 1281 sys::llvm_fenv_clearexcept(); 1282 V = NativeFP(V, W); 1283 if (sys::llvm_fenv_testexcept()) { 1284 sys::llvm_fenv_clearexcept(); 1285 return 0; 1286 } 1287 1288 return GetConstantFoldFPValue(V, Ty); 1289} 1290 1291/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer 1292/// conversion of a constant floating point. If roundTowardZero is false, the 1293/// default IEEE rounding is used (toward nearest, ties to even). This matches 1294/// the behavior of the non-truncating SSE instructions in the default rounding 1295/// mode. The desired integer type Ty is used to select how many bits are 1296/// available for the result. Returns null if the conversion cannot be 1297/// performed, otherwise returns the Constant value resulting from the 1298/// conversion. 1299static Constant *ConstantFoldConvertToInt(const APFloat &Val, 1300 bool roundTowardZero, Type *Ty) { 1301 // All of these conversion intrinsics form an integer of at most 64bits. 1302 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1303 assert(ResultWidth <= 64 && 1304 "Can only constant fold conversions to 64 and 32 bit ints"); 1305 1306 uint64_t UIntVal; 1307 bool isExact = false; 1308 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1309 : APFloat::rmNearestTiesToEven; 1310 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, 1311 /*isSigned=*/true, mode, 1312 &isExact); 1313 if (status != APFloat::opOK && status != APFloat::opInexact) 1314 return 0; 1315 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); 1316} 1317 1318static double getValueAsDouble(ConstantFP *Op) { 1319 Type *Ty = Op->getType(); 1320 1321 if (Ty->isFloatTy()) 1322 return Op->getValueAPF().convertToFloat(); 1323 1324 if (Ty->isDoubleTy()) 1325 return Op->getValueAPF().convertToDouble(); 1326 1327 bool unused; 1328 APFloat APF = Op->getValueAPF(); 1329 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1330 return APF.convertToDouble(); 1331} 1332 1333static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, 1334 Type *Ty, ArrayRef<Constant *> Operands, 1335 const TargetLibraryInfo *TLI) { 1336 if (Operands.size() == 1) { 1337 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) { 1338 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1339 APFloat Val(Op->getValueAPF()); 1340 1341 bool lost = false; 1342 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); 1343 1344 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1345 } 1346 1347 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1348 return 0; 1349 1350 if (IntrinsicID == Intrinsic::round) { 1351 APFloat V = Op->getValueAPF(); 1352 V.roundToIntegral(APFloat::rmNearestTiesToAway); 1353 return ConstantFP::get(Ty->getContext(), V); 1354 } 1355 1356 /// We only fold functions with finite arguments. Folding NaN and inf is 1357 /// likely to be aborted with an exception anyway, and some host libms 1358 /// have known errors raising exceptions. 1359 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1360 return 0; 1361 1362 /// Currently APFloat versions of these functions do not exist, so we use 1363 /// the host native double versions. Float versions are not called 1364 /// directly but for all these it is true (float)(f((double)arg)) == 1365 /// f(arg). Long double not supported yet. 1366 double V = getValueAsDouble(Op); 1367 1368 switch (IntrinsicID) { 1369 default: break; 1370 case Intrinsic::fabs: 1371 return ConstantFoldFP(fabs, V, Ty); 1372#if HAVE_LOG2 1373 case Intrinsic::log2: 1374 return ConstantFoldFP(log2, V, Ty); 1375#endif 1376#if HAVE_LOG 1377 case Intrinsic::log: 1378 return ConstantFoldFP(log, V, Ty); 1379#endif 1380#if HAVE_LOG10 1381 case Intrinsic::log10: 1382 return ConstantFoldFP(log10, V, Ty); 1383#endif 1384#if HAVE_EXP 1385 case Intrinsic::exp: 1386 return ConstantFoldFP(exp, V, Ty); 1387#endif 1388#if HAVE_EXP2 1389 case Intrinsic::exp2: 1390 return ConstantFoldFP(exp2, V, Ty); 1391#endif 1392 case Intrinsic::floor: 1393 return ConstantFoldFP(floor, V, Ty); 1394 case Intrinsic::ceil: 1395 return ConstantFoldFP(ceil, V, Ty); 1396 } 1397 1398 if (!TLI) 1399 return 0; 1400 1401 switch (Name[0]) { 1402 case 'a': 1403 if (Name == "acos" && TLI->has(LibFunc::acos)) 1404 return ConstantFoldFP(acos, V, Ty); 1405 else if (Name == "asin" && TLI->has(LibFunc::asin)) 1406 return ConstantFoldFP(asin, V, Ty); 1407 else if (Name == "atan" && TLI->has(LibFunc::atan)) 1408 return ConstantFoldFP(atan, V, Ty); 1409 break; 1410 case 'c': 1411 if (Name == "ceil" && TLI->has(LibFunc::ceil)) 1412 return ConstantFoldFP(ceil, V, Ty); 1413 else if (Name == "cos" && TLI->has(LibFunc::cos)) 1414 return ConstantFoldFP(cos, V, Ty); 1415 else if (Name == "cosh" && TLI->has(LibFunc::cosh)) 1416 return ConstantFoldFP(cosh, V, Ty); 1417 else if (Name == "cosf" && TLI->has(LibFunc::cosf)) 1418 return ConstantFoldFP(cos, V, Ty); 1419 break; 1420 case 'e': 1421 if (Name == "exp" && TLI->has(LibFunc::exp)) 1422 return ConstantFoldFP(exp, V, Ty); 1423 1424 if (Name == "exp2" && TLI->has(LibFunc::exp2)) { 1425 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a 1426 // C99 library. 1427 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1428 } 1429 break; 1430 case 'f': 1431 if (Name == "fabs" && TLI->has(LibFunc::fabs)) 1432 return ConstantFoldFP(fabs, V, Ty); 1433 else if (Name == "floor" && TLI->has(LibFunc::floor)) 1434 return ConstantFoldFP(floor, V, Ty); 1435 break; 1436 case 'l': 1437 if (Name == "log" && V > 0 && TLI->has(LibFunc::log)) 1438 return ConstantFoldFP(log, V, Ty); 1439 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) 1440 return ConstantFoldFP(log10, V, Ty); 1441 else if (IntrinsicID == Intrinsic::sqrt && 1442 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) { 1443 if (V >= -0.0) 1444 return ConstantFoldFP(sqrt, V, Ty); 1445 else // Undefined 1446 return Constant::getNullValue(Ty); 1447 } 1448 break; 1449 case 's': 1450 if (Name == "sin" && TLI->has(LibFunc::sin)) 1451 return ConstantFoldFP(sin, V, Ty); 1452 else if (Name == "sinh" && TLI->has(LibFunc::sinh)) 1453 return ConstantFoldFP(sinh, V, Ty); 1454 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) 1455 return ConstantFoldFP(sqrt, V, Ty); 1456 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)) 1457 return ConstantFoldFP(sqrt, V, Ty); 1458 else if (Name == "sinf" && TLI->has(LibFunc::sinf)) 1459 return ConstantFoldFP(sin, V, Ty); 1460 break; 1461 case 't': 1462 if (Name == "tan" && TLI->has(LibFunc::tan)) 1463 return ConstantFoldFP(tan, V, Ty); 1464 else if (Name == "tanh" && TLI->has(LibFunc::tanh)) 1465 return ConstantFoldFP(tanh, V, Ty); 1466 break; 1467 default: 1468 break; 1469 } 1470 return 0; 1471 } 1472 1473 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { 1474 switch (IntrinsicID) { 1475 case Intrinsic::bswap: 1476 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 1477 case Intrinsic::ctpop: 1478 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1479 case Intrinsic::convert_from_fp16: { 1480 APFloat Val(APFloat::IEEEhalf, Op->getValue()); 1481 1482 bool lost = false; 1483 APFloat::opStatus status = 1484 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost); 1485 1486 // Conversion is always precise. 1487 (void)status; 1488 assert(status == APFloat::opOK && !lost && 1489 "Precision lost during fp16 constfolding"); 1490 1491 return ConstantFP::get(Ty->getContext(), Val); 1492 } 1493 default: 1494 return 0; 1495 } 1496 } 1497 1498 // Support ConstantVector in case we have an Undef in the top. 1499 if (isa<ConstantVector>(Operands[0]) || 1500 isa<ConstantDataVector>(Operands[0])) { 1501 Constant *Op = cast<Constant>(Operands[0]); 1502 switch (IntrinsicID) { 1503 default: break; 1504 case Intrinsic::x86_sse_cvtss2si: 1505 case Intrinsic::x86_sse_cvtss2si64: 1506 case Intrinsic::x86_sse2_cvtsd2si: 1507 case Intrinsic::x86_sse2_cvtsd2si64: 1508 if (ConstantFP *FPOp = 1509 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1510 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1511 /*roundTowardZero=*/false, Ty); 1512 case Intrinsic::x86_sse_cvttss2si: 1513 case Intrinsic::x86_sse_cvttss2si64: 1514 case Intrinsic::x86_sse2_cvttsd2si: 1515 case Intrinsic::x86_sse2_cvttsd2si64: 1516 if (ConstantFP *FPOp = 1517 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1518 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1519 /*roundTowardZero=*/true, Ty); 1520 } 1521 } 1522 1523 if (isa<UndefValue>(Operands[0])) { 1524 if (IntrinsicID == Intrinsic::bswap) 1525 return Operands[0]; 1526 return 0; 1527 } 1528 1529 return 0; 1530 } 1531 1532 if (Operands.size() == 2) { 1533 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1534 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1535 return 0; 1536 double Op1V = getValueAsDouble(Op1); 1537 1538 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1539 if (Op2->getType() != Op1->getType()) 1540 return 0; 1541 1542 double Op2V = getValueAsDouble(Op2); 1543 if (IntrinsicID == Intrinsic::pow) { 1544 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1545 } 1546 if (IntrinsicID == Intrinsic::copysign) { 1547 APFloat V1 = Op1->getValueAPF(); 1548 APFloat V2 = Op2->getValueAPF(); 1549 V1.copySign(V2); 1550 return ConstantFP::get(Ty->getContext(), V1); 1551 } 1552 if (!TLI) 1553 return 0; 1554 if (Name == "pow" && TLI->has(LibFunc::pow)) 1555 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1556 if (Name == "fmod" && TLI->has(LibFunc::fmod)) 1557 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); 1558 if (Name == "atan2" && TLI->has(LibFunc::atan2)) 1559 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 1560 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 1561 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 1562 return ConstantFP::get(Ty->getContext(), 1563 APFloat((float)std::pow((float)Op1V, 1564 (int)Op2C->getZExtValue()))); 1565 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 1566 return ConstantFP::get(Ty->getContext(), 1567 APFloat((float)std::pow((float)Op1V, 1568 (int)Op2C->getZExtValue()))); 1569 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 1570 return ConstantFP::get(Ty->getContext(), 1571 APFloat((double)std::pow((double)Op1V, 1572 (int)Op2C->getZExtValue()))); 1573 } 1574 return 0; 1575 } 1576 1577 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 1578 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 1579 switch (IntrinsicID) { 1580 default: break; 1581 case Intrinsic::sadd_with_overflow: 1582 case Intrinsic::uadd_with_overflow: 1583 case Intrinsic::ssub_with_overflow: 1584 case Intrinsic::usub_with_overflow: 1585 case Intrinsic::smul_with_overflow: 1586 case Intrinsic::umul_with_overflow: { 1587 APInt Res; 1588 bool Overflow; 1589 switch (IntrinsicID) { 1590 default: llvm_unreachable("Invalid case"); 1591 case Intrinsic::sadd_with_overflow: 1592 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); 1593 break; 1594 case Intrinsic::uadd_with_overflow: 1595 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); 1596 break; 1597 case Intrinsic::ssub_with_overflow: 1598 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); 1599 break; 1600 case Intrinsic::usub_with_overflow: 1601 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); 1602 break; 1603 case Intrinsic::smul_with_overflow: 1604 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); 1605 break; 1606 case Intrinsic::umul_with_overflow: 1607 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); 1608 break; 1609 } 1610 Constant *Ops[] = { 1611 ConstantInt::get(Ty->getContext(), Res), 1612 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 1613 }; 1614 return ConstantStruct::get(cast<StructType>(Ty), Ops); 1615 } 1616 case Intrinsic::cttz: 1617 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef. 1618 return UndefValue::get(Ty); 1619 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); 1620 case Intrinsic::ctlz: 1621 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef. 1622 return UndefValue::get(Ty); 1623 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); 1624 } 1625 } 1626 1627 return 0; 1628 } 1629 return 0; 1630 } 1631 1632 if (Operands.size() != 3) 1633 return 0; 1634 1635 if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1636 if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1637 if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 1638 switch (IntrinsicID) { 1639 default: break; 1640 case Intrinsic::fma: 1641 case Intrinsic::fmuladd: { 1642 APFloat V = Op1->getValueAPF(); 1643 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(), 1644 Op3->getValueAPF(), 1645 APFloat::rmNearestTiesToEven); 1646 if (s != APFloat::opInvalidOp) 1647 return ConstantFP::get(Ty->getContext(), V); 1648 1649 return 0; 1650 } 1651 } 1652 } 1653 } 1654 } 1655 1656 return 0; 1657} 1658 1659static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID, 1660 VectorType *VTy, 1661 ArrayRef<Constant *> Operands, 1662 const TargetLibraryInfo *TLI) { 1663 SmallVector<Constant *, 4> Result(VTy->getNumElements()); 1664 SmallVector<Constant *, 4> Lane(Operands.size()); 1665 Type *Ty = VTy->getElementType(); 1666 1667 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { 1668 // Gather a column of constants. 1669 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 1670 Constant *Agg = Operands[J]->getAggregateElement(I); 1671 if (!Agg) 1672 return nullptr; 1673 1674 Lane[J] = Agg; 1675 } 1676 1677 // Use the regular scalar folding to simplify this column. 1678 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI); 1679 if (!Folded) 1680 return nullptr; 1681 Result[I] = Folded; 1682 } 1683 1684 return ConstantVector::get(Result); 1685} 1686 1687/// ConstantFoldCall - Attempt to constant fold a call to the specified function 1688/// with the specified arguments, returning null if unsuccessful. 1689Constant * 1690llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands, 1691 const TargetLibraryInfo *TLI) { 1692 if (!F->hasName()) 1693 return 0; 1694 StringRef Name = F->getName(); 1695 1696 Type *Ty = F->getReturnType(); 1697 1698 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1699 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI); 1700 1701 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI); 1702} 1703