ConstantFolding.cpp revision 4c5e43da7792f75567b693105cc53e3f1992ad98
1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines routines for folding instructions into constants. 11// 12// Also, to supplement the basic IR ConstantExpr simplifications, 13// this file defines some additional folding routines that can make use of 14// DataLayout information. These functions cannot go in IR due to library 15// dependency issues. 16// 17//===----------------------------------------------------------------------===// 18 19#include "llvm/Analysis/ConstantFolding.h" 20#include "llvm/ADT/SmallPtrSet.h" 21#include "llvm/ADT/SmallVector.h" 22#include "llvm/ADT/StringMap.h" 23#include "llvm/Analysis/TargetLibraryInfo.h" 24#include "llvm/Analysis/ValueTracking.h" 25#include "llvm/Config/config.h" 26#include "llvm/IR/Constants.h" 27#include "llvm/IR/DataLayout.h" 28#include "llvm/IR/DerivedTypes.h" 29#include "llvm/IR/Function.h" 30#include "llvm/IR/GetElementPtrTypeIterator.h" 31#include "llvm/IR/GlobalVariable.h" 32#include "llvm/IR/Instructions.h" 33#include "llvm/IR/Intrinsics.h" 34#include "llvm/IR/Operator.h" 35#include "llvm/Support/ErrorHandling.h" 36#include "llvm/Support/MathExtras.h" 37#include <cerrno> 38#include <cmath> 39 40#ifdef HAVE_FENV_H 41#include <fenv.h> 42#endif 43 44using namespace llvm; 45 46//===----------------------------------------------------------------------===// 47// Constant Folding internal helper functions 48//===----------------------------------------------------------------------===// 49 50/// Constant fold bitcast, symbolically evaluating it with DataLayout. 51/// This always returns a non-null constant, but it may be a 52/// ConstantExpr if unfoldable. 53static Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 54 // Catch the obvious splat cases. 55 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 56 return Constant::getNullValue(DestTy); 57 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && 58 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 59 return Constant::getAllOnesValue(DestTy); 60 61 // Handle a vector->integer cast. 62 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) { 63 VectorType *VTy = dyn_cast<VectorType>(C->getType()); 64 if (!VTy) 65 return ConstantExpr::getBitCast(C, DestTy); 66 67 unsigned NumSrcElts = VTy->getNumElements(); 68 Type *SrcEltTy = VTy->getElementType(); 69 70 // If the vector is a vector of floating point, convert it to vector of int 71 // to simplify things. 72 if (SrcEltTy->isFloatingPointTy()) { 73 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 74 Type *SrcIVTy = 75 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 76 // Ask IR to do the conversion now that #elts line up. 77 C = ConstantExpr::getBitCast(C, SrcIVTy); 78 } 79 80 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); 81 if (!CDV) 82 return ConstantExpr::getBitCast(C, DestTy); 83 84 // Now that we know that the input value is a vector of integers, just shift 85 // and insert them into our result. 86 unsigned BitShift = DL.getTypeAllocSizeInBits(SrcEltTy); 87 APInt Result(IT->getBitWidth(), 0); 88 for (unsigned i = 0; i != NumSrcElts; ++i) { 89 Result <<= BitShift; 90 if (DL.isLittleEndian()) 91 Result |= CDV->getElementAsInteger(NumSrcElts-i-1); 92 else 93 Result |= CDV->getElementAsInteger(i); 94 } 95 96 return ConstantInt::get(IT, Result); 97 } 98 99 // The code below only handles casts to vectors currently. 100 VectorType *DestVTy = dyn_cast<VectorType>(DestTy); 101 if (!DestVTy) 102 return ConstantExpr::getBitCast(C, DestTy); 103 104 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 105 // vector so the code below can handle it uniformly. 106 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 107 Constant *Ops = C; // don't take the address of C! 108 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 109 } 110 111 // If this is a bitcast from constant vector -> vector, fold it. 112 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 113 return ConstantExpr::getBitCast(C, DestTy); 114 115 // If the element types match, IR can fold it. 116 unsigned NumDstElt = DestVTy->getNumElements(); 117 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 118 if (NumDstElt == NumSrcElt) 119 return ConstantExpr::getBitCast(C, DestTy); 120 121 Type *SrcEltTy = C->getType()->getVectorElementType(); 122 Type *DstEltTy = DestVTy->getElementType(); 123 124 // Otherwise, we're changing the number of elements in a vector, which 125 // requires endianness information to do the right thing. For example, 126 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 127 // folds to (little endian): 128 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 129 // and to (big endian): 130 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 131 132 // First thing is first. We only want to think about integer here, so if 133 // we have something in FP form, recast it as integer. 134 if (DstEltTy->isFloatingPointTy()) { 135 // Fold to an vector of integers with same size as our FP type. 136 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 137 Type *DestIVTy = 138 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 139 // Recursively handle this integer conversion, if possible. 140 C = FoldBitCast(C, DestIVTy, DL); 141 142 // Finally, IR can handle this now that #elts line up. 143 return ConstantExpr::getBitCast(C, DestTy); 144 } 145 146 // Okay, we know the destination is integer, if the input is FP, convert 147 // it to integer first. 148 if (SrcEltTy->isFloatingPointTy()) { 149 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 150 Type *SrcIVTy = 151 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 152 // Ask IR to do the conversion now that #elts line up. 153 C = ConstantExpr::getBitCast(C, SrcIVTy); 154 // If IR wasn't able to fold it, bail out. 155 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 156 !isa<ConstantDataVector>(C)) 157 return C; 158 } 159 160 // Now we know that the input and output vectors are both integer vectors 161 // of the same size, and that their #elements is not the same. Do the 162 // conversion here, which depends on whether the input or output has 163 // more elements. 164 bool isLittleEndian = DL.isLittleEndian(); 165 166 SmallVector<Constant*, 32> Result; 167 if (NumDstElt < NumSrcElt) { 168 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 169 Constant *Zero = Constant::getNullValue(DstEltTy); 170 unsigned Ratio = NumSrcElt/NumDstElt; 171 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 172 unsigned SrcElt = 0; 173 for (unsigned i = 0; i != NumDstElt; ++i) { 174 // Build each element of the result. 175 Constant *Elt = Zero; 176 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 177 for (unsigned j = 0; j != Ratio; ++j) { 178 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); 179 if (!Src) // Reject constantexpr elements. 180 return ConstantExpr::getBitCast(C, DestTy); 181 182 // Zero extend the element to the right size. 183 Src = ConstantExpr::getZExt(Src, Elt->getType()); 184 185 // Shift it to the right place, depending on endianness. 186 Src = ConstantExpr::getShl(Src, 187 ConstantInt::get(Src->getType(), ShiftAmt)); 188 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 189 190 // Mix it in. 191 Elt = ConstantExpr::getOr(Elt, Src); 192 } 193 Result.push_back(Elt); 194 } 195 return ConstantVector::get(Result); 196 } 197 198 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 199 unsigned Ratio = NumDstElt/NumSrcElt; 200 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 201 202 // Loop over each source value, expanding into multiple results. 203 for (unsigned i = 0; i != NumSrcElt; ++i) { 204 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); 205 if (!Src) // Reject constantexpr elements. 206 return ConstantExpr::getBitCast(C, DestTy); 207 208 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 209 for (unsigned j = 0; j != Ratio; ++j) { 210 // Shift the piece of the value into the right place, depending on 211 // endianness. 212 Constant *Elt = ConstantExpr::getLShr(Src, 213 ConstantInt::get(Src->getType(), ShiftAmt)); 214 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 215 216 // Truncate the element to an integer with the same pointer size and 217 // convert the element back to a pointer using a inttoptr. 218 if (DstEltTy->isPointerTy()) { 219 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 220 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 221 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 222 continue; 223 } 224 225 // Truncate and remember this piece. 226 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 227 } 228 } 229 230 return ConstantVector::get(Result); 231} 232 233 234/// If this constant is a constant offset from a global, return the global and 235/// the constant. Because of constantexprs, this function is recursive. 236static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 237 APInt &Offset, const DataLayout &DL) { 238 // Trivial case, constant is the global. 239 if ((GV = dyn_cast<GlobalValue>(C))) { 240 unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType()); 241 Offset = APInt(BitWidth, 0); 242 return true; 243 } 244 245 // Otherwise, if this isn't a constant expr, bail out. 246 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 247 if (!CE) return false; 248 249 // Look through ptr->int and ptr->ptr casts. 250 if (CE->getOpcode() == Instruction::PtrToInt || 251 CE->getOpcode() == Instruction::BitCast || 252 CE->getOpcode() == Instruction::AddrSpaceCast) 253 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL); 254 255 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 256 GEPOperator *GEP = dyn_cast<GEPOperator>(CE); 257 if (!GEP) 258 return false; 259 260 unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType()); 261 APInt TmpOffset(BitWidth, 0); 262 263 // If the base isn't a global+constant, we aren't either. 264 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL)) 265 return false; 266 267 // Otherwise, add any offset that our operands provide. 268 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 269 return false; 270 271 Offset = TmpOffset; 272 return true; 273} 274 275/// Recursive helper to read bits out of global. C is the constant being copied 276/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 277/// results into and BytesLeft is the number of bytes left in 278/// the CurPtr buffer. DL is the DataLayout. 279static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, 280 unsigned char *CurPtr, unsigned BytesLeft, 281 const DataLayout &DL) { 282 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 283 "Out of range access"); 284 285 // If this element is zero or undefined, we can just return since *CurPtr is 286 // zero initialized. 287 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 288 return true; 289 290 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 291 if (CI->getBitWidth() > 64 || 292 (CI->getBitWidth() & 7) != 0) 293 return false; 294 295 uint64_t Val = CI->getZExtValue(); 296 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 297 298 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 299 int n = ByteOffset; 300 if (!DL.isLittleEndian()) 301 n = IntBytes - n - 1; 302 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 303 ++ByteOffset; 304 } 305 return true; 306 } 307 308 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 309 if (CFP->getType()->isDoubleTy()) { 310 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 311 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 312 } 313 if (CFP->getType()->isFloatTy()){ 314 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 315 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 316 } 317 if (CFP->getType()->isHalfTy()){ 318 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 319 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 320 } 321 return false; 322 } 323 324 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { 325 const StructLayout *SL = DL.getStructLayout(CS->getType()); 326 unsigned Index = SL->getElementContainingOffset(ByteOffset); 327 uint64_t CurEltOffset = SL->getElementOffset(Index); 328 ByteOffset -= CurEltOffset; 329 330 while (1) { 331 // If the element access is to the element itself and not to tail padding, 332 // read the bytes from the element. 333 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 334 335 if (ByteOffset < EltSize && 336 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 337 BytesLeft, DL)) 338 return false; 339 340 ++Index; 341 342 // Check to see if we read from the last struct element, if so we're done. 343 if (Index == CS->getType()->getNumElements()) 344 return true; 345 346 // If we read all of the bytes we needed from this element we're done. 347 uint64_t NextEltOffset = SL->getElementOffset(Index); 348 349 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 350 return true; 351 352 // Move to the next element of the struct. 353 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 354 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 355 ByteOffset = 0; 356 CurEltOffset = NextEltOffset; 357 } 358 // not reached. 359 } 360 361 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 362 isa<ConstantDataSequential>(C)) { 363 Type *EltTy = C->getType()->getSequentialElementType(); 364 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 365 uint64_t Index = ByteOffset / EltSize; 366 uint64_t Offset = ByteOffset - Index * EltSize; 367 uint64_t NumElts; 368 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType())) 369 NumElts = AT->getNumElements(); 370 else 371 NumElts = C->getType()->getVectorNumElements(); 372 373 for (; Index != NumElts; ++Index) { 374 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 375 BytesLeft, DL)) 376 return false; 377 378 uint64_t BytesWritten = EltSize - Offset; 379 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 380 if (BytesWritten >= BytesLeft) 381 return true; 382 383 Offset = 0; 384 BytesLeft -= BytesWritten; 385 CurPtr += BytesWritten; 386 } 387 return true; 388 } 389 390 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 391 if (CE->getOpcode() == Instruction::IntToPtr && 392 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 393 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 394 BytesLeft, DL); 395 } 396 } 397 398 // Otherwise, unknown initializer type. 399 return false; 400} 401 402static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, 403 const DataLayout &DL) { 404 PointerType *PTy = cast<PointerType>(C->getType()); 405 Type *LoadTy = PTy->getElementType(); 406 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); 407 408 // If this isn't an integer load we can't fold it directly. 409 if (!IntType) { 410 unsigned AS = PTy->getAddressSpace(); 411 412 // If this is a float/double load, we can try folding it as an int32/64 load 413 // and then bitcast the result. This can be useful for union cases. Note 414 // that address spaces don't matter here since we're not going to result in 415 // an actual new load. 416 Type *MapTy; 417 if (LoadTy->isHalfTy()) 418 MapTy = Type::getInt16PtrTy(C->getContext(), AS); 419 else if (LoadTy->isFloatTy()) 420 MapTy = Type::getInt32PtrTy(C->getContext(), AS); 421 else if (LoadTy->isDoubleTy()) 422 MapTy = Type::getInt64PtrTy(C->getContext(), AS); 423 else if (LoadTy->isVectorTy()) { 424 MapTy = PointerType::getIntNPtrTy(C->getContext(), 425 DL.getTypeAllocSizeInBits(LoadTy), AS); 426 } else 427 return nullptr; 428 429 C = FoldBitCast(C, MapTy, DL); 430 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, DL)) 431 return FoldBitCast(Res, LoadTy, DL); 432 return nullptr; 433 } 434 435 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 436 if (BytesLoaded > 32 || BytesLoaded == 0) 437 return nullptr; 438 439 GlobalValue *GVal; 440 APInt Offset; 441 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, DL)) 442 return nullptr; 443 444 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); 445 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 446 !GV->getInitializer()->getType()->isSized()) 447 return nullptr; 448 449 // If we're loading off the beginning of the global, some bytes may be valid, 450 // but we don't try to handle this. 451 if (Offset.isNegative()) 452 return nullptr; 453 454 // If we're not accessing anything in this constant, the result is undefined. 455 if (Offset.getZExtValue() >= 456 DL.getTypeAllocSize(GV->getInitializer()->getType())) 457 return UndefValue::get(IntType); 458 459 unsigned char RawBytes[32] = {0}; 460 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes, 461 BytesLoaded, DL)) 462 return nullptr; 463 464 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 465 if (DL.isLittleEndian()) { 466 ResultVal = RawBytes[BytesLoaded - 1]; 467 for (unsigned i = 1; i != BytesLoaded; ++i) { 468 ResultVal <<= 8; 469 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 470 } 471 } else { 472 ResultVal = RawBytes[0]; 473 for (unsigned i = 1; i != BytesLoaded; ++i) { 474 ResultVal <<= 8; 475 ResultVal |= RawBytes[i]; 476 } 477 } 478 479 return ConstantInt::get(IntType->getContext(), ResultVal); 480} 481 482static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, 483 const DataLayout &DL) { 484 auto *DestPtrTy = dyn_cast<PointerType>(CE->getType()); 485 if (!DestPtrTy) 486 return nullptr; 487 Type *DestTy = DestPtrTy->getElementType(); 488 489 Constant *C = ConstantFoldLoadFromConstPtr(CE->getOperand(0), DL); 490 if (!C) 491 return nullptr; 492 493 do { 494 Type *SrcTy = C->getType(); 495 496 // If the type sizes are the same and a cast is legal, just directly 497 // cast the constant. 498 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) { 499 Instruction::CastOps Cast = Instruction::BitCast; 500 // If we are going from a pointer to int or vice versa, we spell the cast 501 // differently. 502 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 503 Cast = Instruction::IntToPtr; 504 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 505 Cast = Instruction::PtrToInt; 506 507 if (CastInst::castIsValid(Cast, C, DestTy)) 508 return ConstantExpr::getCast(Cast, C, DestTy); 509 } 510 511 // If this isn't an aggregate type, there is nothing we can do to drill down 512 // and find a bitcastable constant. 513 if (!SrcTy->isAggregateType()) 514 return nullptr; 515 516 // We're simulating a load through a pointer that was bitcast to point to 517 // a different type, so we can try to walk down through the initial 518 // elements of an aggregate to see if some part of th e aggregate is 519 // castable to implement the "load" semantic model. 520 C = C->getAggregateElement(0u); 521 } while (C); 522 523 return nullptr; 524} 525 526/// Return the value that a load from C would produce if it is constant and 527/// determinable. If this is not determinable, return null. 528Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, 529 const DataLayout &DL) { 530 // First, try the easy cases: 531 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 532 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 533 return GV->getInitializer(); 534 535 // If the loaded value isn't a constant expr, we can't handle it. 536 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 537 if (!CE) 538 return nullptr; 539 540 if (CE->getOpcode() == Instruction::GetElementPtr) { 541 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 542 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 543 if (Constant *V = 544 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 545 return V; 546 } 547 } 548 } 549 550 if (CE->getOpcode() == Instruction::BitCast) 551 if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, DL)) 552 return LoadedC; 553 554 // Instead of loading constant c string, use corresponding integer value 555 // directly if string length is small enough. 556 StringRef Str; 557 if (getConstantStringInfo(CE, Str) && !Str.empty()) { 558 unsigned StrLen = Str.size(); 559 Type *Ty = cast<PointerType>(CE->getType())->getElementType(); 560 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 561 // Replace load with immediate integer if the result is an integer or fp 562 // value. 563 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 564 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 565 APInt StrVal(NumBits, 0); 566 APInt SingleChar(NumBits, 0); 567 if (DL.isLittleEndian()) { 568 for (signed i = StrLen-1; i >= 0; i--) { 569 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 570 StrVal = (StrVal << 8) | SingleChar; 571 } 572 } else { 573 for (unsigned i = 0; i < StrLen; i++) { 574 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 575 StrVal = (StrVal << 8) | SingleChar; 576 } 577 // Append NULL at the end. 578 SingleChar = 0; 579 StrVal = (StrVal << 8) | SingleChar; 580 } 581 582 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 583 if (Ty->isFloatingPointTy()) 584 Res = ConstantExpr::getBitCast(Res, Ty); 585 return Res; 586 } 587 } 588 589 // If this load comes from anywhere in a constant global, and if the global 590 // is all undef or zero, we know what it loads. 591 if (GlobalVariable *GV = 592 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) { 593 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 594 Type *ResTy = cast<PointerType>(C->getType())->getElementType(); 595 if (GV->getInitializer()->isNullValue()) 596 return Constant::getNullValue(ResTy); 597 if (isa<UndefValue>(GV->getInitializer())) 598 return UndefValue::get(ResTy); 599 } 600 } 601 602 // Try hard to fold loads from bitcasted strange and non-type-safe things. 603 return FoldReinterpretLoadFromConstPtr(CE, DL); 604} 605 606static Constant *ConstantFoldLoadInst(const LoadInst *LI, 607 const DataLayout &DL) { 608 if (LI->isVolatile()) return nullptr; 609 610 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) 611 return ConstantFoldLoadFromConstPtr(C, DL); 612 613 return nullptr; 614} 615 616/// One of Op0/Op1 is a constant expression. 617/// Attempt to symbolically evaluate the result of a binary operator merging 618/// these together. If target data info is available, it is provided as DL, 619/// otherwise DL is null. 620static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, 621 Constant *Op1, 622 const DataLayout &DL) { 623 // SROA 624 625 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 626 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 627 // bits. 628 629 if (Opc == Instruction::And) { 630 unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType()); 631 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); 632 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); 633 computeKnownBits(Op0, KnownZero0, KnownOne0, DL); 634 computeKnownBits(Op1, KnownZero1, KnownOne1, DL); 635 if ((KnownOne1 | KnownZero0).isAllOnesValue()) { 636 // All the bits of Op0 that the 'and' could be masking are already zero. 637 return Op0; 638 } 639 if ((KnownOne0 | KnownZero1).isAllOnesValue()) { 640 // All the bits of Op1 that the 'and' could be masking are already zero. 641 return Op1; 642 } 643 644 APInt KnownZero = KnownZero0 | KnownZero1; 645 APInt KnownOne = KnownOne0 & KnownOne1; 646 if ((KnownZero | KnownOne).isAllOnesValue()) { 647 return ConstantInt::get(Op0->getType(), KnownOne); 648 } 649 } 650 651 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 652 // constant. This happens frequently when iterating over a global array. 653 if (Opc == Instruction::Sub) { 654 GlobalValue *GV1, *GV2; 655 APInt Offs1, Offs2; 656 657 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 658 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 659 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 660 661 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 662 // PtrToInt may change the bitwidth so we have convert to the right size 663 // first. 664 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 665 Offs2.zextOrTrunc(OpSize)); 666 } 667 } 668 669 return nullptr; 670} 671 672/// If array indices are not pointer-sized integers, explicitly cast them so 673/// that they aren't implicitly casted by the getelementptr. 674static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, Type *ResultTy, 675 const DataLayout &DL, 676 const TargetLibraryInfo *TLI) { 677 Type *IntPtrTy = DL.getIntPtrType(ResultTy); 678 679 bool Any = false; 680 SmallVector<Constant*, 32> NewIdxs; 681 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 682 if ((i == 1 || 683 !isa<StructType>(GetElementPtrInst::getIndexedType( 684 Ops[0]->getType(), 685 Ops.slice(1, i - 1)))) && 686 Ops[i]->getType() != IntPtrTy) { 687 Any = true; 688 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 689 true, 690 IntPtrTy, 691 true), 692 Ops[i], IntPtrTy)); 693 } else 694 NewIdxs.push_back(Ops[i]); 695 } 696 697 if (!Any) 698 return nullptr; 699 700 Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs); 701 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 702 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI)) 703 C = Folded; 704 } 705 706 return C; 707} 708 709/// Strip the pointer casts, but preserve the address space information. 710static Constant* StripPtrCastKeepAS(Constant* Ptr) { 711 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 712 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType()); 713 Ptr = Ptr->stripPointerCasts(); 714 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType()); 715 716 // Preserve the address space number of the pointer. 717 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 718 NewPtrTy = NewPtrTy->getElementType()->getPointerTo( 719 OldPtrTy->getAddressSpace()); 720 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 721 } 722 return Ptr; 723} 724 725/// If we can symbolically evaluate the GEP constant expression, do so. 726static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, 727 Type *ResultTy, const DataLayout &DL, 728 const TargetLibraryInfo *TLI) { 729 Constant *Ptr = Ops[0]; 730 if (!Ptr->getType()->getPointerElementType()->isSized() || 731 !Ptr->getType()->isPointerTy()) 732 return nullptr; 733 734 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType()); 735 Type *ResultElementTy = ResultTy->getPointerElementType(); 736 737 // If this is a constant expr gep that is effectively computing an 738 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 739 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 740 if (!isa<ConstantInt>(Ops[i])) { 741 742 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 743 // "inttoptr (sub (ptrtoint Ptr), V)" 744 if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) { 745 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]); 746 assert((!CE || CE->getType() == IntPtrTy) && 747 "CastGEPIndices didn't canonicalize index types!"); 748 if (CE && CE->getOpcode() == Instruction::Sub && 749 CE->getOperand(0)->isNullValue()) { 750 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 751 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 752 Res = ConstantExpr::getIntToPtr(Res, ResultTy); 753 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res)) 754 Res = ConstantFoldConstantExpression(ResCE, DL, TLI); 755 return Res; 756 } 757 } 758 return nullptr; 759 } 760 761 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy); 762 APInt Offset = 763 APInt(BitWidth, 764 DL.getIndexedOffset( 765 Ptr->getType(), 766 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 767 Ptr = StripPtrCastKeepAS(Ptr); 768 769 // If this is a GEP of a GEP, fold it all into a single GEP. 770 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 771 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 772 773 // Do not try the incorporate the sub-GEP if some index is not a number. 774 bool AllConstantInt = true; 775 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) 776 if (!isa<ConstantInt>(NestedOps[i])) { 777 AllConstantInt = false; 778 break; 779 } 780 if (!AllConstantInt) 781 break; 782 783 Ptr = cast<Constant>(GEP->getOperand(0)); 784 Offset += APInt(BitWidth, DL.getIndexedOffset(Ptr->getType(), NestedOps)); 785 Ptr = StripPtrCastKeepAS(Ptr); 786 } 787 788 // If the base value for this address is a literal integer value, fold the 789 // getelementptr to the resulting integer value casted to the pointer type. 790 APInt BasePtr(BitWidth, 0); 791 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 792 if (CE->getOpcode() == Instruction::IntToPtr) { 793 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 794 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 795 } 796 } 797 798 if (Ptr->isNullValue() || BasePtr != 0) { 799 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 800 return ConstantExpr::getIntToPtr(C, ResultTy); 801 } 802 803 // Otherwise form a regular getelementptr. Recompute the indices so that 804 // we eliminate over-indexing of the notional static type array bounds. 805 // This makes it easy to determine if the getelementptr is "inbounds". 806 // Also, this helps GlobalOpt do SROA on GlobalVariables. 807 Type *Ty = Ptr->getType(); 808 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); 809 SmallVector<Constant *, 32> NewIdxs; 810 811 do { 812 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { 813 if (ATy->isPointerTy()) { 814 // The only pointer indexing we'll do is on the first index of the GEP. 815 if (!NewIdxs.empty()) 816 break; 817 818 // Only handle pointers to sized types, not pointers to functions. 819 if (!ATy->getElementType()->isSized()) 820 return nullptr; 821 } 822 823 // Determine which element of the array the offset points into. 824 APInt ElemSize(BitWidth, DL.getTypeAllocSize(ATy->getElementType())); 825 if (ElemSize == 0) 826 // The element size is 0. This may be [0 x Ty]*, so just use a zero 827 // index for this level and proceed to the next level to see if it can 828 // accommodate the offset. 829 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 830 else { 831 // The element size is non-zero divide the offset by the element 832 // size (rounding down), to compute the index at this level. 833 APInt NewIdx = Offset.udiv(ElemSize); 834 Offset -= NewIdx * ElemSize; 835 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 836 } 837 Ty = ATy->getElementType(); 838 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 839 // If we end up with an offset that isn't valid for this struct type, we 840 // can't re-form this GEP in a regular form, so bail out. The pointer 841 // operand likely went through casts that are necessary to make the GEP 842 // sensible. 843 const StructLayout &SL = *DL.getStructLayout(STy); 844 if (Offset.uge(SL.getSizeInBytes())) 845 break; 846 847 // Determine which field of the struct the offset points into. The 848 // getZExtValue is fine as we've already ensured that the offset is 849 // within the range representable by the StructLayout API. 850 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 851 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 852 ElIdx)); 853 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 854 Ty = STy->getTypeAtIndex(ElIdx); 855 } else { 856 // We've reached some non-indexable type. 857 break; 858 } 859 } while (Ty != ResultElementTy); 860 861 // If we haven't used up the entire offset by descending the static 862 // type, then the offset is pointing into the middle of an indivisible 863 // member, so we can't simplify it. 864 if (Offset != 0) 865 return nullptr; 866 867 // Create a GEP. 868 Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs); 869 assert(C->getType()->getPointerElementType() == Ty && 870 "Computed GetElementPtr has unexpected type!"); 871 872 // If we ended up indexing a member with a type that doesn't match 873 // the type of what the original indices indexed, add a cast. 874 if (Ty != ResultElementTy) 875 C = FoldBitCast(C, ResultTy, DL); 876 877 return C; 878} 879 880 881 882//===----------------------------------------------------------------------===// 883// Constant Folding public APIs 884//===----------------------------------------------------------------------===// 885 886/// Try to constant fold the specified instruction. 887/// If successful, the constant result is returned, if not, null is returned. 888/// Note that this fails if not all of the operands are constant. Otherwise, 889/// this function can only fail when attempting to fold instructions like loads 890/// and stores, which have no constant expression form. 891Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 892 const TargetLibraryInfo *TLI) { 893 // Handle PHI nodes quickly here... 894 if (PHINode *PN = dyn_cast<PHINode>(I)) { 895 Constant *CommonValue = nullptr; 896 897 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 898 Value *Incoming = PN->getIncomingValue(i); 899 // If the incoming value is undef then skip it. Note that while we could 900 // skip the value if it is equal to the phi node itself we choose not to 901 // because that would break the rule that constant folding only applies if 902 // all operands are constants. 903 if (isa<UndefValue>(Incoming)) 904 continue; 905 // If the incoming value is not a constant, then give up. 906 Constant *C = dyn_cast<Constant>(Incoming); 907 if (!C) 908 return nullptr; 909 // Fold the PHI's operands. 910 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C)) 911 C = ConstantFoldConstantExpression(NewC, DL, TLI); 912 // If the incoming value is a different constant to 913 // the one we saw previously, then give up. 914 if (CommonValue && C != CommonValue) 915 return nullptr; 916 CommonValue = C; 917 } 918 919 920 // If we reach here, all incoming values are the same constant or undef. 921 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 922 } 923 924 // Scan the operand list, checking to see if they are all constants, if so, 925 // hand off to ConstantFoldInstOperands. 926 SmallVector<Constant*, 8> Ops; 927 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { 928 Constant *Op = dyn_cast<Constant>(*i); 929 if (!Op) 930 return nullptr; // All operands not constant! 931 932 // Fold the Instruction's operands. 933 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op)) 934 Op = ConstantFoldConstantExpression(NewCE, DL, TLI); 935 936 Ops.push_back(Op); 937 } 938 939 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 940 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 941 DL, TLI); 942 943 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 944 return ConstantFoldLoadInst(LI, DL); 945 946 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) { 947 return ConstantExpr::getInsertValue( 948 cast<Constant>(IVI->getAggregateOperand()), 949 cast<Constant>(IVI->getInsertedValueOperand()), 950 IVI->getIndices()); 951 } 952 953 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) { 954 return ConstantExpr::getExtractValue( 955 cast<Constant>(EVI->getAggregateOperand()), 956 EVI->getIndices()); 957 } 958 959 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, DL, TLI); 960} 961 962static Constant * 963ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL, 964 const TargetLibraryInfo *TLI, 965 SmallPtrSetImpl<ConstantExpr *> &FoldedOps) { 966 SmallVector<Constant *, 8> Ops; 967 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; 968 ++i) { 969 Constant *NewC = cast<Constant>(*i); 970 // Recursively fold the ConstantExpr's operands. If we have already folded 971 // a ConstantExpr, we don't have to process it again. 972 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) { 973 if (FoldedOps.insert(NewCE).second) 974 NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps); 975 } 976 Ops.push_back(NewC); 977 } 978 979 if (CE->isCompare()) 980 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 981 DL, TLI); 982 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, DL, TLI); 983} 984 985/// Attempt to fold the constant expression 986/// using the specified DataLayout. If successful, the constant result is 987/// result is returned, if not, null is returned. 988Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, 989 const DataLayout &DL, 990 const TargetLibraryInfo *TLI) { 991 SmallPtrSet<ConstantExpr *, 4> FoldedOps; 992 return ConstantFoldConstantExpressionImpl(CE, DL, TLI, FoldedOps); 993} 994 995/// Attempt to constant fold an instruction with the 996/// specified opcode and operands. If successful, the constant result is 997/// returned, if not, null is returned. Note that this function can fail when 998/// attempting to fold instructions like loads and stores, which have no 999/// constant expression form. 1000/// 1001/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc 1002/// information, due to only being passed an opcode and operands. Constant 1003/// folding using this function strips this information. 1004/// 1005Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 1006 ArrayRef<Constant *> Ops, 1007 const DataLayout &DL, 1008 const TargetLibraryInfo *TLI) { 1009 // Handle easy binops first. 1010 if (Instruction::isBinaryOp(Opcode)) { 1011 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) { 1012 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], DL)) 1013 return C; 1014 } 1015 1016 return ConstantExpr::get(Opcode, Ops[0], Ops[1]); 1017 } 1018 1019 switch (Opcode) { 1020 default: return nullptr; 1021 case Instruction::ICmp: 1022 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1023 case Instruction::Call: 1024 if (Function *F = dyn_cast<Function>(Ops.back())) 1025 if (canConstantFoldCallTo(F)) 1026 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); 1027 return nullptr; 1028 case Instruction::PtrToInt: 1029 // If the input is a inttoptr, eliminate the pair. This requires knowing 1030 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1031 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1032 if (CE->getOpcode() == Instruction::IntToPtr) { 1033 Constant *Input = CE->getOperand(0); 1034 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1035 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); 1036 if (PtrWidth < InWidth) { 1037 Constant *Mask = 1038 ConstantInt::get(CE->getContext(), 1039 APInt::getLowBitsSet(InWidth, PtrWidth)); 1040 Input = ConstantExpr::getAnd(Input, Mask); 1041 } 1042 // Do a zext or trunc to get to the dest size. 1043 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1044 } 1045 } 1046 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1047 case Instruction::IntToPtr: 1048 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1049 // the int size is >= the ptr size and the address spaces are the same. 1050 // This requires knowing the width of a pointer, so it can't be done in 1051 // ConstantExpr::getCast. 1052 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 1053 if (CE->getOpcode() == Instruction::PtrToInt) { 1054 Constant *SrcPtr = CE->getOperand(0); 1055 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1056 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1057 1058 if (MidIntSize >= SrcPtrSize) { 1059 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1060 if (SrcAS == DestTy->getPointerAddressSpace()) 1061 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1062 } 1063 } 1064 } 1065 1066 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1067 case Instruction::Trunc: 1068 case Instruction::ZExt: 1069 case Instruction::SExt: 1070 case Instruction::FPTrunc: 1071 case Instruction::FPExt: 1072 case Instruction::UIToFP: 1073 case Instruction::SIToFP: 1074 case Instruction::FPToUI: 1075 case Instruction::FPToSI: 1076 case Instruction::AddrSpaceCast: 1077 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 1078 case Instruction::BitCast: 1079 return FoldBitCast(Ops[0], DestTy, DL); 1080 case Instruction::Select: 1081 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1082 case Instruction::ExtractElement: 1083 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1084 case Instruction::InsertElement: 1085 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1086 case Instruction::ShuffleVector: 1087 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 1088 case Instruction::GetElementPtr: 1089 if (Constant *C = CastGEPIndices(Ops, DestTy, DL, TLI)) 1090 return C; 1091 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, DL, TLI)) 1092 return C; 1093 1094 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); 1095 } 1096} 1097 1098/// Attempt to constant fold a compare 1099/// instruction (icmp/fcmp) with the specified operands. If it fails, it 1100/// returns a constant expression of the specified operands. 1101Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1102 Constant *Ops0, Constant *Ops1, 1103 const DataLayout &DL, 1104 const TargetLibraryInfo *TLI) { 1105 // fold: icmp (inttoptr x), null -> icmp x, 0 1106 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1107 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1108 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1109 // 1110 // FIXME: The following comment is out of data and the DataLayout is here now. 1111 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1112 // around to know if bit truncation is happening. 1113 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1114 if (Ops1->isNullValue()) { 1115 if (CE0->getOpcode() == Instruction::IntToPtr) { 1116 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1117 // Convert the integer value to the right size to ensure we get the 1118 // proper extension or truncation. 1119 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1120 IntPtrTy, false); 1121 Constant *Null = Constant::getNullValue(C->getType()); 1122 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1123 } 1124 1125 // Only do this transformation if the int is intptrty in size, otherwise 1126 // there is a truncation or extension that we aren't modeling. 1127 if (CE0->getOpcode() == Instruction::PtrToInt) { 1128 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1129 if (CE0->getType() == IntPtrTy) { 1130 Constant *C = CE0->getOperand(0); 1131 Constant *Null = Constant::getNullValue(C->getType()); 1132 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1133 } 1134 } 1135 } 1136 1137 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1138 if (CE0->getOpcode() == CE1->getOpcode()) { 1139 if (CE0->getOpcode() == Instruction::IntToPtr) { 1140 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1141 1142 // Convert the integer value to the right size to ensure we get the 1143 // proper extension or truncation. 1144 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1145 IntPtrTy, false); 1146 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1147 IntPtrTy, false); 1148 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1149 } 1150 1151 // Only do this transformation if the int is intptrty in size, otherwise 1152 // there is a truncation or extension that we aren't modeling. 1153 if (CE0->getOpcode() == Instruction::PtrToInt) { 1154 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1155 if (CE0->getType() == IntPtrTy && 1156 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1157 return ConstantFoldCompareInstOperands( 1158 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1159 } 1160 } 1161 } 1162 } 1163 1164 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1165 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1166 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1167 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1168 Constant *LHS = ConstantFoldCompareInstOperands( 1169 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1170 Constant *RHS = ConstantFoldCompareInstOperands( 1171 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1172 unsigned OpC = 1173 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1174 Constant *Ops[] = { LHS, RHS }; 1175 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, DL, TLI); 1176 } 1177 } 1178 1179 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1180} 1181 1182 1183/// Given a constant and a getelementptr constantexpr, return the constant value 1184/// being addressed by the constant expression, or null if something is funny 1185/// and we can't decide. 1186Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1187 ConstantExpr *CE) { 1188 if (!CE->getOperand(1)->isNullValue()) 1189 return nullptr; // Do not allow stepping over the value! 1190 1191 // Loop over all of the operands, tracking down which value we are 1192 // addressing. 1193 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1194 C = C->getAggregateElement(CE->getOperand(i)); 1195 if (!C) 1196 return nullptr; 1197 } 1198 return C; 1199} 1200 1201/// Given a constant and getelementptr indices (with an *implied* zero pointer 1202/// index that is not in the list), return the constant value being addressed by 1203/// a virtual load, or null if something is funny and we can't decide. 1204Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1205 ArrayRef<Constant*> Indices) { 1206 // Loop over all of the operands, tracking down which value we are 1207 // addressing. 1208 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1209 C = C->getAggregateElement(Indices[i]); 1210 if (!C) 1211 return nullptr; 1212 } 1213 return C; 1214} 1215 1216 1217//===----------------------------------------------------------------------===// 1218// Constant Folding for Calls 1219// 1220 1221/// Return true if it's even possible to fold a call to the specified function. 1222bool llvm::canConstantFoldCallTo(const Function *F) { 1223 switch (F->getIntrinsicID()) { 1224 case Intrinsic::fabs: 1225 case Intrinsic::minnum: 1226 case Intrinsic::maxnum: 1227 case Intrinsic::log: 1228 case Intrinsic::log2: 1229 case Intrinsic::log10: 1230 case Intrinsic::exp: 1231 case Intrinsic::exp2: 1232 case Intrinsic::floor: 1233 case Intrinsic::ceil: 1234 case Intrinsic::sqrt: 1235 case Intrinsic::pow: 1236 case Intrinsic::powi: 1237 case Intrinsic::bswap: 1238 case Intrinsic::ctpop: 1239 case Intrinsic::ctlz: 1240 case Intrinsic::cttz: 1241 case Intrinsic::fma: 1242 case Intrinsic::fmuladd: 1243 case Intrinsic::copysign: 1244 case Intrinsic::round: 1245 case Intrinsic::sadd_with_overflow: 1246 case Intrinsic::uadd_with_overflow: 1247 case Intrinsic::ssub_with_overflow: 1248 case Intrinsic::usub_with_overflow: 1249 case Intrinsic::smul_with_overflow: 1250 case Intrinsic::umul_with_overflow: 1251 case Intrinsic::convert_from_fp16: 1252 case Intrinsic::convert_to_fp16: 1253 case Intrinsic::x86_sse_cvtss2si: 1254 case Intrinsic::x86_sse_cvtss2si64: 1255 case Intrinsic::x86_sse_cvttss2si: 1256 case Intrinsic::x86_sse_cvttss2si64: 1257 case Intrinsic::x86_sse2_cvtsd2si: 1258 case Intrinsic::x86_sse2_cvtsd2si64: 1259 case Intrinsic::x86_sse2_cvttsd2si: 1260 case Intrinsic::x86_sse2_cvttsd2si64: 1261 return true; 1262 default: 1263 return false; 1264 case 0: break; 1265 } 1266 1267 if (!F->hasName()) 1268 return false; 1269 StringRef Name = F->getName(); 1270 1271 // In these cases, the check of the length is required. We don't want to 1272 // return true for a name like "cos\0blah" which strcmp would return equal to 1273 // "cos", but has length 8. 1274 switch (Name[0]) { 1275 default: return false; 1276 case 'a': 1277 return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2"; 1278 case 'c': 1279 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; 1280 case 'e': 1281 return Name == "exp" || Name == "exp2"; 1282 case 'f': 1283 return Name == "fabs" || Name == "fmod" || Name == "floor"; 1284 case 'l': 1285 return Name == "log" || Name == "log10"; 1286 case 'p': 1287 return Name == "pow"; 1288 case 's': 1289 return Name == "sin" || Name == "sinh" || Name == "sqrt" || 1290 Name == "sinf" || Name == "sqrtf"; 1291 case 't': 1292 return Name == "tan" || Name == "tanh"; 1293 } 1294} 1295 1296static Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1297 if (Ty->isHalfTy()) { 1298 APFloat APF(V); 1299 bool unused; 1300 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); 1301 return ConstantFP::get(Ty->getContext(), APF); 1302 } 1303 if (Ty->isFloatTy()) 1304 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1305 if (Ty->isDoubleTy()) 1306 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1307 llvm_unreachable("Can only constant fold half/float/double"); 1308 1309} 1310 1311namespace { 1312/// Clear the floating-point exception state. 1313static inline void llvm_fenv_clearexcept() { 1314#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1315 feclearexcept(FE_ALL_EXCEPT); 1316#endif 1317 errno = 0; 1318} 1319 1320/// Test if a floating-point exception was raised. 1321static inline bool llvm_fenv_testexcept() { 1322 int errno_val = errno; 1323 if (errno_val == ERANGE || errno_val == EDOM) 1324 return true; 1325#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1326 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1327 return true; 1328#endif 1329 return false; 1330} 1331} // End namespace 1332 1333static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 1334 Type *Ty) { 1335 llvm_fenv_clearexcept(); 1336 V = NativeFP(V); 1337 if (llvm_fenv_testexcept()) { 1338 llvm_fenv_clearexcept(); 1339 return nullptr; 1340 } 1341 1342 return GetConstantFoldFPValue(V, Ty); 1343} 1344 1345static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1346 double V, double W, Type *Ty) { 1347 llvm_fenv_clearexcept(); 1348 V = NativeFP(V, W); 1349 if (llvm_fenv_testexcept()) { 1350 llvm_fenv_clearexcept(); 1351 return nullptr; 1352 } 1353 1354 return GetConstantFoldFPValue(V, Ty); 1355} 1356 1357/// Attempt to fold an SSE floating point to integer conversion of a constant 1358/// floating point. If roundTowardZero is false, the default IEEE rounding is 1359/// used (toward nearest, ties to even). This matches the behavior of the 1360/// non-truncating SSE instructions in the default rounding mode. The desired 1361/// integer type Ty is used to select how many bits are available for the 1362/// result. Returns null if the conversion cannot be performed, otherwise 1363/// returns the Constant value resulting from the conversion. 1364static Constant *ConstantFoldConvertToInt(const APFloat &Val, 1365 bool roundTowardZero, Type *Ty) { 1366 // All of these conversion intrinsics form an integer of at most 64bits. 1367 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1368 assert(ResultWidth <= 64 && 1369 "Can only constant fold conversions to 64 and 32 bit ints"); 1370 1371 uint64_t UIntVal; 1372 bool isExact = false; 1373 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1374 : APFloat::rmNearestTiesToEven; 1375 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, 1376 /*isSigned=*/true, mode, 1377 &isExact); 1378 if (status != APFloat::opOK && status != APFloat::opInexact) 1379 return nullptr; 1380 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); 1381} 1382 1383static double getValueAsDouble(ConstantFP *Op) { 1384 Type *Ty = Op->getType(); 1385 1386 if (Ty->isFloatTy()) 1387 return Op->getValueAPF().convertToFloat(); 1388 1389 if (Ty->isDoubleTy()) 1390 return Op->getValueAPF().convertToDouble(); 1391 1392 bool unused; 1393 APFloat APF = Op->getValueAPF(); 1394 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); 1395 return APF.convertToDouble(); 1396} 1397 1398static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, 1399 Type *Ty, ArrayRef<Constant *> Operands, 1400 const TargetLibraryInfo *TLI) { 1401 if (Operands.size() == 1) { 1402 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) { 1403 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1404 APFloat Val(Op->getValueAPF()); 1405 1406 bool lost = false; 1407 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); 1408 1409 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1410 } 1411 1412 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1413 return nullptr; 1414 1415 if (IntrinsicID == Intrinsic::round) { 1416 APFloat V = Op->getValueAPF(); 1417 V.roundToIntegral(APFloat::rmNearestTiesToAway); 1418 return ConstantFP::get(Ty->getContext(), V); 1419 } 1420 1421 /// We only fold functions with finite arguments. Folding NaN and inf is 1422 /// likely to be aborted with an exception anyway, and some host libms 1423 /// have known errors raising exceptions. 1424 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1425 return nullptr; 1426 1427 /// Currently APFloat versions of these functions do not exist, so we use 1428 /// the host native double versions. Float versions are not called 1429 /// directly but for all these it is true (float)(f((double)arg)) == 1430 /// f(arg). Long double not supported yet. 1431 double V = getValueAsDouble(Op); 1432 1433 switch (IntrinsicID) { 1434 default: break; 1435 case Intrinsic::fabs: 1436 return ConstantFoldFP(fabs, V, Ty); 1437 case Intrinsic::log2: 1438 return ConstantFoldFP(log2, V, Ty); 1439 case Intrinsic::log: 1440 return ConstantFoldFP(log, V, Ty); 1441 case Intrinsic::log10: 1442 return ConstantFoldFP(log10, V, Ty); 1443 case Intrinsic::exp: 1444 return ConstantFoldFP(exp, V, Ty); 1445 case Intrinsic::exp2: 1446 return ConstantFoldFP(exp2, V, Ty); 1447 case Intrinsic::floor: 1448 return ConstantFoldFP(floor, V, Ty); 1449 case Intrinsic::ceil: 1450 return ConstantFoldFP(ceil, V, Ty); 1451 } 1452 1453 if (!TLI) 1454 return nullptr; 1455 1456 switch (Name[0]) { 1457 case 'a': 1458 if (Name == "acos" && TLI->has(LibFunc::acos)) 1459 return ConstantFoldFP(acos, V, Ty); 1460 else if (Name == "asin" && TLI->has(LibFunc::asin)) 1461 return ConstantFoldFP(asin, V, Ty); 1462 else if (Name == "atan" && TLI->has(LibFunc::atan)) 1463 return ConstantFoldFP(atan, V, Ty); 1464 break; 1465 case 'c': 1466 if (Name == "ceil" && TLI->has(LibFunc::ceil)) 1467 return ConstantFoldFP(ceil, V, Ty); 1468 else if (Name == "cos" && TLI->has(LibFunc::cos)) 1469 return ConstantFoldFP(cos, V, Ty); 1470 else if (Name == "cosh" && TLI->has(LibFunc::cosh)) 1471 return ConstantFoldFP(cosh, V, Ty); 1472 else if (Name == "cosf" && TLI->has(LibFunc::cosf)) 1473 return ConstantFoldFP(cos, V, Ty); 1474 break; 1475 case 'e': 1476 if (Name == "exp" && TLI->has(LibFunc::exp)) 1477 return ConstantFoldFP(exp, V, Ty); 1478 1479 if (Name == "exp2" && TLI->has(LibFunc::exp2)) { 1480 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a 1481 // C99 library. 1482 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1483 } 1484 break; 1485 case 'f': 1486 if (Name == "fabs" && TLI->has(LibFunc::fabs)) 1487 return ConstantFoldFP(fabs, V, Ty); 1488 else if (Name == "floor" && TLI->has(LibFunc::floor)) 1489 return ConstantFoldFP(floor, V, Ty); 1490 break; 1491 case 'l': 1492 if (Name == "log" && V > 0 && TLI->has(LibFunc::log)) 1493 return ConstantFoldFP(log, V, Ty); 1494 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) 1495 return ConstantFoldFP(log10, V, Ty); 1496 else if (IntrinsicID == Intrinsic::sqrt && 1497 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) { 1498 if (V >= -0.0) 1499 return ConstantFoldFP(sqrt, V, Ty); 1500 else { 1501 // Unlike the sqrt definitions in C/C++, POSIX, and IEEE-754 - which 1502 // all guarantee or favor returning NaN - the square root of a 1503 // negative number is not defined for the LLVM sqrt intrinsic. 1504 // This is because the intrinsic should only be emitted in place of 1505 // libm's sqrt function when using "no-nans-fp-math". 1506 return UndefValue::get(Ty); 1507 } 1508 } 1509 break; 1510 case 's': 1511 if (Name == "sin" && TLI->has(LibFunc::sin)) 1512 return ConstantFoldFP(sin, V, Ty); 1513 else if (Name == "sinh" && TLI->has(LibFunc::sinh)) 1514 return ConstantFoldFP(sinh, V, Ty); 1515 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) 1516 return ConstantFoldFP(sqrt, V, Ty); 1517 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)) 1518 return ConstantFoldFP(sqrt, V, Ty); 1519 else if (Name == "sinf" && TLI->has(LibFunc::sinf)) 1520 return ConstantFoldFP(sin, V, Ty); 1521 break; 1522 case 't': 1523 if (Name == "tan" && TLI->has(LibFunc::tan)) 1524 return ConstantFoldFP(tan, V, Ty); 1525 else if (Name == "tanh" && TLI->has(LibFunc::tanh)) 1526 return ConstantFoldFP(tanh, V, Ty); 1527 break; 1528 default: 1529 break; 1530 } 1531 return nullptr; 1532 } 1533 1534 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { 1535 switch (IntrinsicID) { 1536 case Intrinsic::bswap: 1537 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 1538 case Intrinsic::ctpop: 1539 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1540 case Intrinsic::convert_from_fp16: { 1541 APFloat Val(APFloat::IEEEhalf, Op->getValue()); 1542 1543 bool lost = false; 1544 APFloat::opStatus status = 1545 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost); 1546 1547 // Conversion is always precise. 1548 (void)status; 1549 assert(status == APFloat::opOK && !lost && 1550 "Precision lost during fp16 constfolding"); 1551 1552 return ConstantFP::get(Ty->getContext(), Val); 1553 } 1554 default: 1555 return nullptr; 1556 } 1557 } 1558 1559 // Support ConstantVector in case we have an Undef in the top. 1560 if (isa<ConstantVector>(Operands[0]) || 1561 isa<ConstantDataVector>(Operands[0])) { 1562 Constant *Op = cast<Constant>(Operands[0]); 1563 switch (IntrinsicID) { 1564 default: break; 1565 case Intrinsic::x86_sse_cvtss2si: 1566 case Intrinsic::x86_sse_cvtss2si64: 1567 case Intrinsic::x86_sse2_cvtsd2si: 1568 case Intrinsic::x86_sse2_cvtsd2si64: 1569 if (ConstantFP *FPOp = 1570 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1571 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1572 /*roundTowardZero=*/false, Ty); 1573 case Intrinsic::x86_sse_cvttss2si: 1574 case Intrinsic::x86_sse_cvttss2si64: 1575 case Intrinsic::x86_sse2_cvttsd2si: 1576 case Intrinsic::x86_sse2_cvttsd2si64: 1577 if (ConstantFP *FPOp = 1578 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1579 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1580 /*roundTowardZero=*/true, Ty); 1581 } 1582 } 1583 1584 if (isa<UndefValue>(Operands[0])) { 1585 if (IntrinsicID == Intrinsic::bswap) 1586 return Operands[0]; 1587 return nullptr; 1588 } 1589 1590 return nullptr; 1591 } 1592 1593 if (Operands.size() == 2) { 1594 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1595 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1596 return nullptr; 1597 double Op1V = getValueAsDouble(Op1); 1598 1599 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1600 if (Op2->getType() != Op1->getType()) 1601 return nullptr; 1602 1603 double Op2V = getValueAsDouble(Op2); 1604 if (IntrinsicID == Intrinsic::pow) { 1605 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1606 } 1607 if (IntrinsicID == Intrinsic::copysign) { 1608 APFloat V1 = Op1->getValueAPF(); 1609 APFloat V2 = Op2->getValueAPF(); 1610 V1.copySign(V2); 1611 return ConstantFP::get(Ty->getContext(), V1); 1612 } 1613 1614 if (IntrinsicID == Intrinsic::minnum) { 1615 const APFloat &C1 = Op1->getValueAPF(); 1616 const APFloat &C2 = Op2->getValueAPF(); 1617 return ConstantFP::get(Ty->getContext(), minnum(C1, C2)); 1618 } 1619 1620 if (IntrinsicID == Intrinsic::maxnum) { 1621 const APFloat &C1 = Op1->getValueAPF(); 1622 const APFloat &C2 = Op2->getValueAPF(); 1623 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2)); 1624 } 1625 1626 if (!TLI) 1627 return nullptr; 1628 if (Name == "pow" && TLI->has(LibFunc::pow)) 1629 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1630 if (Name == "fmod" && TLI->has(LibFunc::fmod)) 1631 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); 1632 if (Name == "atan2" && TLI->has(LibFunc::atan2)) 1633 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 1634 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 1635 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 1636 return ConstantFP::get(Ty->getContext(), 1637 APFloat((float)std::pow((float)Op1V, 1638 (int)Op2C->getZExtValue()))); 1639 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 1640 return ConstantFP::get(Ty->getContext(), 1641 APFloat((float)std::pow((float)Op1V, 1642 (int)Op2C->getZExtValue()))); 1643 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 1644 return ConstantFP::get(Ty->getContext(), 1645 APFloat((double)std::pow((double)Op1V, 1646 (int)Op2C->getZExtValue()))); 1647 } 1648 return nullptr; 1649 } 1650 1651 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 1652 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 1653 switch (IntrinsicID) { 1654 default: break; 1655 case Intrinsic::sadd_with_overflow: 1656 case Intrinsic::uadd_with_overflow: 1657 case Intrinsic::ssub_with_overflow: 1658 case Intrinsic::usub_with_overflow: 1659 case Intrinsic::smul_with_overflow: 1660 case Intrinsic::umul_with_overflow: { 1661 APInt Res; 1662 bool Overflow; 1663 switch (IntrinsicID) { 1664 default: llvm_unreachable("Invalid case"); 1665 case Intrinsic::sadd_with_overflow: 1666 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); 1667 break; 1668 case Intrinsic::uadd_with_overflow: 1669 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); 1670 break; 1671 case Intrinsic::ssub_with_overflow: 1672 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); 1673 break; 1674 case Intrinsic::usub_with_overflow: 1675 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); 1676 break; 1677 case Intrinsic::smul_with_overflow: 1678 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); 1679 break; 1680 case Intrinsic::umul_with_overflow: 1681 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); 1682 break; 1683 } 1684 Constant *Ops[] = { 1685 ConstantInt::get(Ty->getContext(), Res), 1686 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 1687 }; 1688 return ConstantStruct::get(cast<StructType>(Ty), Ops); 1689 } 1690 case Intrinsic::cttz: 1691 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef. 1692 return UndefValue::get(Ty); 1693 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); 1694 case Intrinsic::ctlz: 1695 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef. 1696 return UndefValue::get(Ty); 1697 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); 1698 } 1699 } 1700 1701 return nullptr; 1702 } 1703 return nullptr; 1704 } 1705 1706 if (Operands.size() != 3) 1707 return nullptr; 1708 1709 if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1710 if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1711 if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 1712 switch (IntrinsicID) { 1713 default: break; 1714 case Intrinsic::fma: 1715 case Intrinsic::fmuladd: { 1716 APFloat V = Op1->getValueAPF(); 1717 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(), 1718 Op3->getValueAPF(), 1719 APFloat::rmNearestTiesToEven); 1720 if (s != APFloat::opInvalidOp) 1721 return ConstantFP::get(Ty->getContext(), V); 1722 1723 return nullptr; 1724 } 1725 } 1726 } 1727 } 1728 } 1729 1730 return nullptr; 1731} 1732 1733static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID, 1734 VectorType *VTy, 1735 ArrayRef<Constant *> Operands, 1736 const TargetLibraryInfo *TLI) { 1737 SmallVector<Constant *, 4> Result(VTy->getNumElements()); 1738 SmallVector<Constant *, 4> Lane(Operands.size()); 1739 Type *Ty = VTy->getElementType(); 1740 1741 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { 1742 // Gather a column of constants. 1743 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 1744 Constant *Agg = Operands[J]->getAggregateElement(I); 1745 if (!Agg) 1746 return nullptr; 1747 1748 Lane[J] = Agg; 1749 } 1750 1751 // Use the regular scalar folding to simplify this column. 1752 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI); 1753 if (!Folded) 1754 return nullptr; 1755 Result[I] = Folded; 1756 } 1757 1758 return ConstantVector::get(Result); 1759} 1760 1761/// Attempt to constant fold a call to the specified function 1762/// with the specified arguments, returning null if unsuccessful. 1763Constant * 1764llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands, 1765 const TargetLibraryInfo *TLI) { 1766 if (!F->hasName()) 1767 return nullptr; 1768 StringRef Name = F->getName(); 1769 1770 Type *Ty = F->getReturnType(); 1771 1772 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1773 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI); 1774 1775 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI); 1776} 1777