1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines routines for folding instructions into constants. 11// 12// Also, to supplement the basic VMCore ConstantExpr simplifications, 13// this file defines some additional folding routines that can make use of 14// TargetData information. These functions cannot go in VMCore due to library 15// dependency issues. 16// 17//===----------------------------------------------------------------------===// 18 19#include "llvm/Analysis/ConstantFolding.h" 20#include "llvm/Constants.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/Function.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Instructions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/Operator.h" 27#include "llvm/Analysis/ValueTracking.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetLibraryInfo.h" 30#include "llvm/ADT/SmallVector.h" 31#include "llvm/ADT/StringMap.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/GetElementPtrTypeIterator.h" 34#include "llvm/Support/MathExtras.h" 35#include "llvm/Support/FEnv.h" 36#include <cerrno> 37#include <cmath> 38using namespace llvm; 39 40//===----------------------------------------------------------------------===// 41// Constant Folding internal helper functions 42//===----------------------------------------------------------------------===// 43 44/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with 45/// TargetData. This always returns a non-null constant, but it may be a 46/// ConstantExpr if unfoldable. 47static Constant *FoldBitCast(Constant *C, Type *DestTy, 48 const TargetData &TD) { 49 // Catch the obvious splat cases. 50 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 51 return Constant::getNullValue(DestTy); 52 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy()) 53 return Constant::getAllOnesValue(DestTy); 54 55 // Handle a vector->integer cast. 56 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) { 57 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C); 58 if (CDV == 0) 59 return ConstantExpr::getBitCast(C, DestTy); 60 61 unsigned NumSrcElts = CDV->getType()->getNumElements(); 62 63 Type *SrcEltTy = CDV->getType()->getElementType(); 64 65 // If the vector is a vector of floating point, convert it to vector of int 66 // to simplify things. 67 if (SrcEltTy->isFloatingPointTy()) { 68 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 69 Type *SrcIVTy = 70 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 71 // Ask VMCore to do the conversion now that #elts line up. 72 C = ConstantExpr::getBitCast(C, SrcIVTy); 73 CDV = cast<ConstantDataVector>(C); 74 } 75 76 // Now that we know that the input value is a vector of integers, just shift 77 // and insert them into our result. 78 unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); 79 APInt Result(IT->getBitWidth(), 0); 80 for (unsigned i = 0; i != NumSrcElts; ++i) { 81 Result <<= BitShift; 82 if (TD.isLittleEndian()) 83 Result |= CDV->getElementAsInteger(NumSrcElts-i-1); 84 else 85 Result |= CDV->getElementAsInteger(i); 86 } 87 88 return ConstantInt::get(IT, Result); 89 } 90 91 // The code below only handles casts to vectors currently. 92 VectorType *DestVTy = dyn_cast<VectorType>(DestTy); 93 if (DestVTy == 0) 94 return ConstantExpr::getBitCast(C, DestTy); 95 96 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 97 // vector so the code below can handle it uniformly. 98 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 99 Constant *Ops = C; // don't take the address of C! 100 return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); 101 } 102 103 // If this is a bitcast from constant vector -> vector, fold it. 104 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 105 return ConstantExpr::getBitCast(C, DestTy); 106 107 // If the element types match, VMCore can fold it. 108 unsigned NumDstElt = DestVTy->getNumElements(); 109 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 110 if (NumDstElt == NumSrcElt) 111 return ConstantExpr::getBitCast(C, DestTy); 112 113 Type *SrcEltTy = C->getType()->getVectorElementType(); 114 Type *DstEltTy = DestVTy->getElementType(); 115 116 // Otherwise, we're changing the number of elements in a vector, which 117 // requires endianness information to do the right thing. For example, 118 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 119 // folds to (little endian): 120 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 121 // and to (big endian): 122 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 123 124 // First thing is first. We only want to think about integer here, so if 125 // we have something in FP form, recast it as integer. 126 if (DstEltTy->isFloatingPointTy()) { 127 // Fold to an vector of integers with same size as our FP type. 128 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 129 Type *DestIVTy = 130 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 131 // Recursively handle this integer conversion, if possible. 132 C = FoldBitCast(C, DestIVTy, TD); 133 134 // Finally, VMCore can handle this now that #elts line up. 135 return ConstantExpr::getBitCast(C, DestTy); 136 } 137 138 // Okay, we know the destination is integer, if the input is FP, convert 139 // it to integer first. 140 if (SrcEltTy->isFloatingPointTy()) { 141 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 142 Type *SrcIVTy = 143 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 144 // Ask VMCore to do the conversion now that #elts line up. 145 C = ConstantExpr::getBitCast(C, SrcIVTy); 146 // If VMCore wasn't able to fold it, bail out. 147 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 148 !isa<ConstantDataVector>(C)) 149 return C; 150 } 151 152 // Now we know that the input and output vectors are both integer vectors 153 // of the same size, and that their #elements is not the same. Do the 154 // conversion here, which depends on whether the input or output has 155 // more elements. 156 bool isLittleEndian = TD.isLittleEndian(); 157 158 SmallVector<Constant*, 32> Result; 159 if (NumDstElt < NumSrcElt) { 160 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 161 Constant *Zero = Constant::getNullValue(DstEltTy); 162 unsigned Ratio = NumSrcElt/NumDstElt; 163 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 164 unsigned SrcElt = 0; 165 for (unsigned i = 0; i != NumDstElt; ++i) { 166 // Build each element of the result. 167 Constant *Elt = Zero; 168 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 169 for (unsigned j = 0; j != Ratio; ++j) { 170 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); 171 if (!Src) // Reject constantexpr elements. 172 return ConstantExpr::getBitCast(C, DestTy); 173 174 // Zero extend the element to the right size. 175 Src = ConstantExpr::getZExt(Src, Elt->getType()); 176 177 // Shift it to the right place, depending on endianness. 178 Src = ConstantExpr::getShl(Src, 179 ConstantInt::get(Src->getType(), ShiftAmt)); 180 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 181 182 // Mix it in. 183 Elt = ConstantExpr::getOr(Elt, Src); 184 } 185 Result.push_back(Elt); 186 } 187 return ConstantVector::get(Result); 188 } 189 190 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 191 unsigned Ratio = NumDstElt/NumSrcElt; 192 unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); 193 194 // Loop over each source value, expanding into multiple results. 195 for (unsigned i = 0; i != NumSrcElt; ++i) { 196 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); 197 if (!Src) // Reject constantexpr elements. 198 return ConstantExpr::getBitCast(C, DestTy); 199 200 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 201 for (unsigned j = 0; j != Ratio; ++j) { 202 // Shift the piece of the value into the right place, depending on 203 // endianness. 204 Constant *Elt = ConstantExpr::getLShr(Src, 205 ConstantInt::get(Src->getType(), ShiftAmt)); 206 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 207 208 // Truncate and remember this piece. 209 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 210 } 211 } 212 213 return ConstantVector::get(Result); 214} 215 216 217/// IsConstantOffsetFromGlobal - If this constant is actually a constant offset 218/// from a global, return the global and the constant. Because of 219/// constantexprs, this function is recursive. 220static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 221 int64_t &Offset, const TargetData &TD) { 222 // Trivial case, constant is the global. 223 if ((GV = dyn_cast<GlobalValue>(C))) { 224 Offset = 0; 225 return true; 226 } 227 228 // Otherwise, if this isn't a constant expr, bail out. 229 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 230 if (!CE) return false; 231 232 // Look through ptr->int and ptr->ptr casts. 233 if (CE->getOpcode() == Instruction::PtrToInt || 234 CE->getOpcode() == Instruction::BitCast) 235 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); 236 237 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 238 if (CE->getOpcode() == Instruction::GetElementPtr) { 239 // Cannot compute this if the element type of the pointer is missing size 240 // info. 241 if (!cast<PointerType>(CE->getOperand(0)->getType()) 242 ->getElementType()->isSized()) 243 return false; 244 245 // If the base isn't a global+constant, we aren't either. 246 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD)) 247 return false; 248 249 // Otherwise, add any offset that our operands provide. 250 gep_type_iterator GTI = gep_type_begin(CE); 251 for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end(); 252 i != e; ++i, ++GTI) { 253 ConstantInt *CI = dyn_cast<ConstantInt>(*i); 254 if (!CI) return false; // Index isn't a simple constant? 255 if (CI->isZero()) continue; // Not adding anything. 256 257 if (StructType *ST = dyn_cast<StructType>(*GTI)) { 258 // N = N + Offset 259 Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); 260 } else { 261 SequentialType *SQT = cast<SequentialType>(*GTI); 262 Offset += TD.getTypeAllocSize(SQT->getElementType())*CI->getSExtValue(); 263 } 264 } 265 return true; 266 } 267 268 return false; 269} 270 271/// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the 272/// constant being copied out of. ByteOffset is an offset into C. CurPtr is the 273/// pointer to copy results into and BytesLeft is the number of bytes left in 274/// the CurPtr buffer. TD is the target data. 275static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, 276 unsigned char *CurPtr, unsigned BytesLeft, 277 const TargetData &TD) { 278 assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && 279 "Out of range access"); 280 281 // If this element is zero or undefined, we can just return since *CurPtr is 282 // zero initialized. 283 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 284 return true; 285 286 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 287 if (CI->getBitWidth() > 64 || 288 (CI->getBitWidth() & 7) != 0) 289 return false; 290 291 uint64_t Val = CI->getZExtValue(); 292 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 293 294 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 295 CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8)); 296 ++ByteOffset; 297 } 298 return true; 299 } 300 301 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 302 if (CFP->getType()->isDoubleTy()) { 303 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); 304 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 305 } 306 if (CFP->getType()->isFloatTy()){ 307 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD); 308 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); 309 } 310 return false; 311 } 312 313 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { 314 const StructLayout *SL = TD.getStructLayout(CS->getType()); 315 unsigned Index = SL->getElementContainingOffset(ByteOffset); 316 uint64_t CurEltOffset = SL->getElementOffset(Index); 317 ByteOffset -= CurEltOffset; 318 319 while (1) { 320 // If the element access is to the element itself and not to tail padding, 321 // read the bytes from the element. 322 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); 323 324 if (ByteOffset < EltSize && 325 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 326 BytesLeft, TD)) 327 return false; 328 329 ++Index; 330 331 // Check to see if we read from the last struct element, if so we're done. 332 if (Index == CS->getType()->getNumElements()) 333 return true; 334 335 // If we read all of the bytes we needed from this element we're done. 336 uint64_t NextEltOffset = SL->getElementOffset(Index); 337 338 if (BytesLeft <= NextEltOffset-CurEltOffset-ByteOffset) 339 return true; 340 341 // Move to the next element of the struct. 342 CurPtr += NextEltOffset-CurEltOffset-ByteOffset; 343 BytesLeft -= NextEltOffset-CurEltOffset-ByteOffset; 344 ByteOffset = 0; 345 CurEltOffset = NextEltOffset; 346 } 347 // not reached. 348 } 349 350 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 351 isa<ConstantDataSequential>(C)) { 352 Type *EltTy = cast<SequentialType>(C->getType())->getElementType(); 353 uint64_t EltSize = TD.getTypeAllocSize(EltTy); 354 uint64_t Index = ByteOffset / EltSize; 355 uint64_t Offset = ByteOffset - Index * EltSize; 356 uint64_t NumElts; 357 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType())) 358 NumElts = AT->getNumElements(); 359 else 360 NumElts = cast<VectorType>(C->getType())->getNumElements(); 361 362 for (; Index != NumElts; ++Index) { 363 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 364 BytesLeft, TD)) 365 return false; 366 367 uint64_t BytesWritten = EltSize - Offset; 368 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 369 if (BytesWritten >= BytesLeft) 370 return true; 371 372 Offset = 0; 373 BytesLeft -= BytesWritten; 374 CurPtr += BytesWritten; 375 } 376 return true; 377 } 378 379 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 380 if (CE->getOpcode() == Instruction::IntToPtr && 381 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) 382 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 383 BytesLeft, TD); 384 } 385 386 // Otherwise, unknown initializer type. 387 return false; 388} 389 390static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, 391 const TargetData &TD) { 392 Type *LoadTy = cast<PointerType>(C->getType())->getElementType(); 393 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); 394 395 // If this isn't an integer load we can't fold it directly. 396 if (!IntType) { 397 // If this is a float/double load, we can try folding it as an int32/64 load 398 // and then bitcast the result. This can be useful for union cases. Note 399 // that address spaces don't matter here since we're not going to result in 400 // an actual new load. 401 Type *MapTy; 402 if (LoadTy->isFloatTy()) 403 MapTy = Type::getInt32PtrTy(C->getContext()); 404 else if (LoadTy->isDoubleTy()) 405 MapTy = Type::getInt64PtrTy(C->getContext()); 406 else if (LoadTy->isVectorTy()) { 407 MapTy = IntegerType::get(C->getContext(), 408 TD.getTypeAllocSizeInBits(LoadTy)); 409 MapTy = PointerType::getUnqual(MapTy); 410 } else 411 return 0; 412 413 C = FoldBitCast(C, MapTy, TD); 414 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD)) 415 return FoldBitCast(Res, LoadTy, TD); 416 return 0; 417 } 418 419 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 420 if (BytesLoaded > 32 || BytesLoaded == 0) return 0; 421 422 GlobalValue *GVal; 423 int64_t Offset; 424 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) 425 return 0; 426 427 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); 428 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 429 !GV->getInitializer()->getType()->isSized()) 430 return 0; 431 432 // If we're loading off the beginning of the global, some bytes may be valid, 433 // but we don't try to handle this. 434 if (Offset < 0) return 0; 435 436 // If we're not accessing anything in this constant, the result is undefined. 437 if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType())) 438 return UndefValue::get(IntType); 439 440 unsigned char RawBytes[32] = {0}; 441 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes, 442 BytesLoaded, TD)) 443 return 0; 444 445 APInt ResultVal = APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1]); 446 for (unsigned i = 1; i != BytesLoaded; ++i) { 447 ResultVal <<= 8; 448 ResultVal |= RawBytes[BytesLoaded-1-i]; 449 } 450 451 return ConstantInt::get(IntType->getContext(), ResultVal); 452} 453 454/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would 455/// produce if it is constant and determinable. If this is not determinable, 456/// return null. 457Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, 458 const TargetData *TD) { 459 // First, try the easy cases: 460 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 461 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 462 return GV->getInitializer(); 463 464 // If the loaded value isn't a constant expr, we can't handle it. 465 ConstantExpr *CE = dyn_cast<ConstantExpr>(C); 466 if (!CE) return 0; 467 468 if (CE->getOpcode() == Instruction::GetElementPtr) { 469 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) 470 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 471 if (Constant *V = 472 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 473 return V; 474 } 475 476 // Instead of loading constant c string, use corresponding integer value 477 // directly if string length is small enough. 478 StringRef Str; 479 if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) { 480 unsigned StrLen = Str.size(); 481 Type *Ty = cast<PointerType>(CE->getType())->getElementType(); 482 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 483 // Replace load with immediate integer if the result is an integer or fp 484 // value. 485 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 486 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 487 APInt StrVal(NumBits, 0); 488 APInt SingleChar(NumBits, 0); 489 if (TD->isLittleEndian()) { 490 for (signed i = StrLen-1; i >= 0; i--) { 491 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 492 StrVal = (StrVal << 8) | SingleChar; 493 } 494 } else { 495 for (unsigned i = 0; i < StrLen; i++) { 496 SingleChar = (uint64_t) Str[i] & UCHAR_MAX; 497 StrVal = (StrVal << 8) | SingleChar; 498 } 499 // Append NULL at the end. 500 SingleChar = 0; 501 StrVal = (StrVal << 8) | SingleChar; 502 } 503 504 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 505 if (Ty->isFloatingPointTy()) 506 Res = ConstantExpr::getBitCast(Res, Ty); 507 return Res; 508 } 509 } 510 511 // If this load comes from anywhere in a constant global, and if the global 512 // is all undef or zero, we know what it loads. 513 if (GlobalVariable *GV = 514 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) { 515 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 516 Type *ResTy = cast<PointerType>(C->getType())->getElementType(); 517 if (GV->getInitializer()->isNullValue()) 518 return Constant::getNullValue(ResTy); 519 if (isa<UndefValue>(GV->getInitializer())) 520 return UndefValue::get(ResTy); 521 } 522 } 523 524 // Try hard to fold loads from bitcasted strange and non-type-safe things. We 525 // currently don't do any of this for big endian systems. It can be 526 // generalized in the future if someone is interested. 527 if (TD && TD->isLittleEndian()) 528 return FoldReinterpretLoadFromConstPtr(CE, *TD); 529 return 0; 530} 531 532static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){ 533 if (LI->isVolatile()) return 0; 534 535 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) 536 return ConstantFoldLoadFromConstPtr(C, TD); 537 538 return 0; 539} 540 541/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. 542/// Attempt to symbolically evaluate the result of a binary operator merging 543/// these together. If target data info is available, it is provided as TD, 544/// otherwise TD is null. 545static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, 546 Constant *Op1, const TargetData *TD){ 547 // SROA 548 549 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 550 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 551 // bits. 552 553 554 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 555 // constant. This happens frequently when iterating over a global array. 556 if (Opc == Instruction::Sub && TD) { 557 GlobalValue *GV1, *GV2; 558 int64_t Offs1, Offs2; 559 560 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD)) 561 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) && 562 GV1 == GV2) { 563 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 564 return ConstantInt::get(Op0->getType(), Offs1-Offs2); 565 } 566 } 567 568 return 0; 569} 570 571/// CastGEPIndices - If array indices are not pointer-sized integers, 572/// explicitly cast them so that they aren't implicitly casted by the 573/// getelementptr. 574static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, 575 Type *ResultTy, const TargetData *TD, 576 const TargetLibraryInfo *TLI) { 577 if (!TD) return 0; 578 Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); 579 580 bool Any = false; 581 SmallVector<Constant*, 32> NewIdxs; 582 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 583 if ((i == 1 || 584 !isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(), 585 Ops.slice(1, i-1)))) && 586 Ops[i]->getType() != IntPtrTy) { 587 Any = true; 588 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 589 true, 590 IntPtrTy, 591 true), 592 Ops[i], IntPtrTy)); 593 } else 594 NewIdxs.push_back(Ops[i]); 595 } 596 if (!Any) return 0; 597 598 Constant *C = 599 ConstantExpr::getGetElementPtr(Ops[0], NewIdxs); 600 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 601 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) 602 C = Folded; 603 return C; 604} 605 606/// Strip the pointer casts, but preserve the address space information. 607static Constant* StripPtrCastKeepAS(Constant* Ptr) { 608 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 609 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType()); 610 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 611 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType()); 612 613 // Preserve the address space number of the pointer. 614 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 615 NewPtrTy = NewPtrTy->getElementType()->getPointerTo( 616 OldPtrTy->getAddressSpace()); 617 Ptr = ConstantExpr::getBitCast(Ptr, NewPtrTy); 618 } 619 return Ptr; 620} 621 622/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP 623/// constant expression, do so. 624static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, 625 Type *ResultTy, const TargetData *TD, 626 const TargetLibraryInfo *TLI) { 627 Constant *Ptr = Ops[0]; 628 if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() || 629 !Ptr->getType()->isPointerTy()) 630 return 0; 631 632 Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext()); 633 634 // If this is a constant expr gep that is effectively computing an 635 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 636 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 637 if (!isa<ConstantInt>(Ops[i])) { 638 639 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 640 // "inttoptr (sub (ptrtoint Ptr), V)" 641 if (Ops.size() == 2 && 642 cast<PointerType>(ResultTy)->getElementType()->isIntegerTy(8)) { 643 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]); 644 assert((CE == 0 || CE->getType() == IntPtrTy) && 645 "CastGEPIndices didn't canonicalize index types!"); 646 if (CE && CE->getOpcode() == Instruction::Sub && 647 CE->getOperand(0)->isNullValue()) { 648 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 649 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 650 Res = ConstantExpr::getIntToPtr(Res, ResultTy); 651 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res)) 652 Res = ConstantFoldConstantExpression(ResCE, TD, TLI); 653 return Res; 654 } 655 } 656 return 0; 657 } 658 659 unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy); 660 APInt Offset = 661 APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), 662 makeArrayRef((Value *const*) 663 Ops.data() + 1, 664 Ops.size() - 1))); 665 Ptr = StripPtrCastKeepAS(Ptr); 666 667 // If this is a GEP of a GEP, fold it all into a single GEP. 668 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 669 SmallVector<Value *, 4> NestedOps(GEP->op_begin()+1, GEP->op_end()); 670 671 // Do not try the incorporate the sub-GEP if some index is not a number. 672 bool AllConstantInt = true; 673 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) 674 if (!isa<ConstantInt>(NestedOps[i])) { 675 AllConstantInt = false; 676 break; 677 } 678 if (!AllConstantInt) 679 break; 680 681 Ptr = cast<Constant>(GEP->getOperand(0)); 682 Offset += APInt(BitWidth, 683 TD->getIndexedOffset(Ptr->getType(), NestedOps)); 684 Ptr = StripPtrCastKeepAS(Ptr); 685 } 686 687 // If the base value for this address is a literal integer value, fold the 688 // getelementptr to the resulting integer value casted to the pointer type. 689 APInt BasePtr(BitWidth, 0); 690 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 691 if (CE->getOpcode() == Instruction::IntToPtr) 692 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 693 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 694 if (Ptr->isNullValue() || BasePtr != 0) { 695 Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr); 696 return ConstantExpr::getIntToPtr(C, ResultTy); 697 } 698 699 // Otherwise form a regular getelementptr. Recompute the indices so that 700 // we eliminate over-indexing of the notional static type array bounds. 701 // This makes it easy to determine if the getelementptr is "inbounds". 702 // Also, this helps GlobalOpt do SROA on GlobalVariables. 703 Type *Ty = Ptr->getType(); 704 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); 705 SmallVector<Constant*, 32> NewIdxs; 706 do { 707 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { 708 if (ATy->isPointerTy()) { 709 // The only pointer indexing we'll do is on the first index of the GEP. 710 if (!NewIdxs.empty()) 711 break; 712 713 // Only handle pointers to sized types, not pointers to functions. 714 if (!ATy->getElementType()->isSized()) 715 return 0; 716 } 717 718 // Determine which element of the array the offset points into. 719 APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); 720 IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 721 if (ElemSize == 0) 722 // The element size is 0. This may be [0 x Ty]*, so just use a zero 723 // index for this level and proceed to the next level to see if it can 724 // accommodate the offset. 725 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 726 else { 727 // The element size is non-zero divide the offset by the element 728 // size (rounding down), to compute the index at this level. 729 APInt NewIdx = Offset.udiv(ElemSize); 730 Offset -= NewIdx * ElemSize; 731 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 732 } 733 Ty = ATy->getElementType(); 734 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 735 // If we end up with an offset that isn't valid for this struct type, we 736 // can't re-form this GEP in a regular form, so bail out. The pointer 737 // operand likely went through casts that are necessary to make the GEP 738 // sensible. 739 const StructLayout &SL = *TD->getStructLayout(STy); 740 if (Offset.uge(SL.getSizeInBytes())) 741 break; 742 743 // Determine which field of the struct the offset points into. The 744 // getZExtValue is fine as we've already ensured that the offset is 745 // within the range representable by the StructLayout API. 746 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 747 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 748 ElIdx)); 749 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 750 Ty = STy->getTypeAtIndex(ElIdx); 751 } else { 752 // We've reached some non-indexable type. 753 break; 754 } 755 } while (Ty != cast<PointerType>(ResultTy)->getElementType()); 756 757 // If we haven't used up the entire offset by descending the static 758 // type, then the offset is pointing into the middle of an indivisible 759 // member, so we can't simplify it. 760 if (Offset != 0) 761 return 0; 762 763 // Create a GEP. 764 Constant *C = 765 ConstantExpr::getGetElementPtr(Ptr, NewIdxs); 766 assert(cast<PointerType>(C->getType())->getElementType() == Ty && 767 "Computed GetElementPtr has unexpected type!"); 768 769 // If we ended up indexing a member with a type that doesn't match 770 // the type of what the original indices indexed, add a cast. 771 if (Ty != cast<PointerType>(ResultTy)->getElementType()) 772 C = FoldBitCast(C, ResultTy, *TD); 773 774 return C; 775} 776 777 778 779//===----------------------------------------------------------------------===// 780// Constant Folding public APIs 781//===----------------------------------------------------------------------===// 782 783/// ConstantFoldInstruction - Try to constant fold the specified instruction. 784/// If successful, the constant result is returned, if not, null is returned. 785/// Note that this fails if not all of the operands are constant. Otherwise, 786/// this function can only fail when attempting to fold instructions like loads 787/// and stores, which have no constant expression form. 788Constant *llvm::ConstantFoldInstruction(Instruction *I, 789 const TargetData *TD, 790 const TargetLibraryInfo *TLI) { 791 // Handle PHI nodes quickly here... 792 if (PHINode *PN = dyn_cast<PHINode>(I)) { 793 Constant *CommonValue = 0; 794 795 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 796 Value *Incoming = PN->getIncomingValue(i); 797 // If the incoming value is undef then skip it. Note that while we could 798 // skip the value if it is equal to the phi node itself we choose not to 799 // because that would break the rule that constant folding only applies if 800 // all operands are constants. 801 if (isa<UndefValue>(Incoming)) 802 continue; 803 // If the incoming value is not a constant, then give up. 804 Constant *C = dyn_cast<Constant>(Incoming); 805 if (!C) 806 return 0; 807 // Fold the PHI's operands. 808 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C)) 809 C = ConstantFoldConstantExpression(NewC, TD, TLI); 810 // If the incoming value is a different constant to 811 // the one we saw previously, then give up. 812 if (CommonValue && C != CommonValue) 813 return 0; 814 CommonValue = C; 815 } 816 817 818 // If we reach here, all incoming values are the same constant or undef. 819 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 820 } 821 822 // Scan the operand list, checking to see if they are all constants, if so, 823 // hand off to ConstantFoldInstOperands. 824 SmallVector<Constant*, 8> Ops; 825 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { 826 Constant *Op = dyn_cast<Constant>(*i); 827 if (!Op) 828 return 0; // All operands not constant! 829 830 // Fold the Instruction's operands. 831 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op)) 832 Op = ConstantFoldConstantExpression(NewCE, TD, TLI); 833 834 Ops.push_back(Op); 835 } 836 837 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 838 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 839 TD, TLI); 840 841 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 842 return ConstantFoldLoadInst(LI, TD); 843 844 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) 845 return ConstantExpr::getInsertValue( 846 cast<Constant>(IVI->getAggregateOperand()), 847 cast<Constant>(IVI->getInsertedValueOperand()), 848 IVI->getIndices()); 849 850 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) 851 return ConstantExpr::getExtractValue( 852 cast<Constant>(EVI->getAggregateOperand()), 853 EVI->getIndices()); 854 855 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI); 856} 857 858/// ConstantFoldConstantExpression - Attempt to fold the constant expression 859/// using the specified TargetData. If successful, the constant result is 860/// result is returned, if not, null is returned. 861Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, 862 const TargetData *TD, 863 const TargetLibraryInfo *TLI) { 864 SmallVector<Constant*, 8> Ops; 865 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); 866 i != e; ++i) { 867 Constant *NewC = cast<Constant>(*i); 868 // Recursively fold the ConstantExpr's operands. 869 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) 870 NewC = ConstantFoldConstantExpression(NewCE, TD, TLI); 871 Ops.push_back(NewC); 872 } 873 874 if (CE->isCompare()) 875 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 876 TD, TLI); 877 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI); 878} 879 880/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the 881/// specified opcode and operands. If successful, the constant result is 882/// returned, if not, null is returned. Note that this function can fail when 883/// attempting to fold instructions like loads and stores, which have no 884/// constant expression form. 885/// 886/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc 887/// information, due to only being passed an opcode and operands. Constant 888/// folding using this function strips this information. 889/// 890Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 891 ArrayRef<Constant *> Ops, 892 const TargetData *TD, 893 const TargetLibraryInfo *TLI) { 894 // Handle easy binops first. 895 if (Instruction::isBinaryOp(Opcode)) { 896 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) 897 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) 898 return C; 899 900 return ConstantExpr::get(Opcode, Ops[0], Ops[1]); 901 } 902 903 switch (Opcode) { 904 default: return 0; 905 case Instruction::ICmp: 906 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 907 case Instruction::Call: 908 if (Function *F = dyn_cast<Function>(Ops.back())) 909 if (canConstantFoldCallTo(F)) 910 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); 911 return 0; 912 case Instruction::PtrToInt: 913 // If the input is a inttoptr, eliminate the pair. This requires knowing 914 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 915 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) { 916 if (TD && CE->getOpcode() == Instruction::IntToPtr) { 917 Constant *Input = CE->getOperand(0); 918 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 919 if (TD->getPointerSizeInBits() < InWidth) { 920 Constant *Mask = 921 ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, 922 TD->getPointerSizeInBits())); 923 Input = ConstantExpr::getAnd(Input, Mask); 924 } 925 // Do a zext or trunc to get to the dest size. 926 return ConstantExpr::getIntegerCast(Input, DestTy, false); 927 } 928 } 929 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 930 case Instruction::IntToPtr: 931 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 932 // the int size is >= the ptr size. This requires knowing the width of a 933 // pointer, so it can't be done in ConstantExpr::getCast. 934 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) 935 if (TD && 936 TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() && 937 CE->getOpcode() == Instruction::PtrToInt) 938 return FoldBitCast(CE->getOperand(0), DestTy, *TD); 939 940 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 941 case Instruction::Trunc: 942 case Instruction::ZExt: 943 case Instruction::SExt: 944 case Instruction::FPTrunc: 945 case Instruction::FPExt: 946 case Instruction::UIToFP: 947 case Instruction::SIToFP: 948 case Instruction::FPToUI: 949 case Instruction::FPToSI: 950 return ConstantExpr::getCast(Opcode, Ops[0], DestTy); 951 case Instruction::BitCast: 952 if (TD) 953 return FoldBitCast(Ops[0], DestTy, *TD); 954 return ConstantExpr::getBitCast(Ops[0], DestTy); 955 case Instruction::Select: 956 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 957 case Instruction::ExtractElement: 958 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 959 case Instruction::InsertElement: 960 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 961 case Instruction::ShuffleVector: 962 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 963 case Instruction::GetElementPtr: 964 if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI)) 965 return C; 966 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI)) 967 return C; 968 969 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); 970 } 971} 972 973/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare 974/// instruction (icmp/fcmp) with the specified operands. If it fails, it 975/// returns a constant expression of the specified operands. 976/// 977Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 978 Constant *Ops0, Constant *Ops1, 979 const TargetData *TD, 980 const TargetLibraryInfo *TLI) { 981 // fold: icmp (inttoptr x), null -> icmp x, 0 982 // fold: icmp (ptrtoint x), 0 -> icmp x, null 983 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 984 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 985 // 986 // ConstantExpr::getCompare cannot do this, because it doesn't have TD 987 // around to know if bit truncation is happening. 988 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 989 if (TD && Ops1->isNullValue()) { 990 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); 991 if (CE0->getOpcode() == Instruction::IntToPtr) { 992 // Convert the integer value to the right size to ensure we get the 993 // proper extension or truncation. 994 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 995 IntPtrTy, false); 996 Constant *Null = Constant::getNullValue(C->getType()); 997 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 998 } 999 1000 // Only do this transformation if the int is intptrty in size, otherwise 1001 // there is a truncation or extension that we aren't modeling. 1002 if (CE0->getOpcode() == Instruction::PtrToInt && 1003 CE0->getType() == IntPtrTy) { 1004 Constant *C = CE0->getOperand(0); 1005 Constant *Null = Constant::getNullValue(C->getType()); 1006 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); 1007 } 1008 } 1009 1010 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1011 if (TD && CE0->getOpcode() == CE1->getOpcode()) { 1012 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); 1013 1014 if (CE0->getOpcode() == Instruction::IntToPtr) { 1015 // Convert the integer value to the right size to ensure we get the 1016 // proper extension or truncation. 1017 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1018 IntPtrTy, false); 1019 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1020 IntPtrTy, false); 1021 return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI); 1022 } 1023 1024 // Only do this transformation if the int is intptrty in size, otherwise 1025 // there is a truncation or extension that we aren't modeling. 1026 if ((CE0->getOpcode() == Instruction::PtrToInt && 1027 CE0->getType() == IntPtrTy && 1028 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) 1029 return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), 1030 CE1->getOperand(0), TD, TLI); 1031 } 1032 } 1033 1034 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1035 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1036 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1037 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1038 Constant *LHS = 1039 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1, 1040 TD, TLI); 1041 Constant *RHS = 1042 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1, 1043 TD, TLI); 1044 unsigned OpC = 1045 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1046 Constant *Ops[] = { LHS, RHS }; 1047 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI); 1048 } 1049 } 1050 1051 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1052} 1053 1054 1055/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a 1056/// getelementptr constantexpr, return the constant value being addressed by the 1057/// constant expression, or null if something is funny and we can't decide. 1058Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1059 ConstantExpr *CE) { 1060 if (!CE->getOperand(1)->isNullValue()) 1061 return 0; // Do not allow stepping over the value! 1062 1063 // Loop over all of the operands, tracking down which value we are 1064 // addressing. 1065 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1066 C = C->getAggregateElement(CE->getOperand(i)); 1067 if (C == 0) return 0; 1068 } 1069 return C; 1070} 1071 1072/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr 1073/// indices (with an *implied* zero pointer index that is not in the list), 1074/// return the constant value being addressed by a virtual load, or null if 1075/// something is funny and we can't decide. 1076Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1077 ArrayRef<Constant*> Indices) { 1078 // Loop over all of the operands, tracking down which value we are 1079 // addressing. 1080 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1081 C = C->getAggregateElement(Indices[i]); 1082 if (C == 0) return 0; 1083 } 1084 return C; 1085} 1086 1087 1088//===----------------------------------------------------------------------===// 1089// Constant Folding for Calls 1090// 1091 1092/// canConstantFoldCallTo - Return true if its even possible to fold a call to 1093/// the specified function. 1094bool 1095llvm::canConstantFoldCallTo(const Function *F) { 1096 switch (F->getIntrinsicID()) { 1097 case Intrinsic::sqrt: 1098 case Intrinsic::pow: 1099 case Intrinsic::powi: 1100 case Intrinsic::bswap: 1101 case Intrinsic::ctpop: 1102 case Intrinsic::ctlz: 1103 case Intrinsic::cttz: 1104 case Intrinsic::sadd_with_overflow: 1105 case Intrinsic::uadd_with_overflow: 1106 case Intrinsic::ssub_with_overflow: 1107 case Intrinsic::usub_with_overflow: 1108 case Intrinsic::smul_with_overflow: 1109 case Intrinsic::umul_with_overflow: 1110 case Intrinsic::convert_from_fp16: 1111 case Intrinsic::convert_to_fp16: 1112 case Intrinsic::x86_sse_cvtss2si: 1113 case Intrinsic::x86_sse_cvtss2si64: 1114 case Intrinsic::x86_sse_cvttss2si: 1115 case Intrinsic::x86_sse_cvttss2si64: 1116 case Intrinsic::x86_sse2_cvtsd2si: 1117 case Intrinsic::x86_sse2_cvtsd2si64: 1118 case Intrinsic::x86_sse2_cvttsd2si: 1119 case Intrinsic::x86_sse2_cvttsd2si64: 1120 return true; 1121 default: 1122 return false; 1123 case 0: break; 1124 } 1125 1126 if (!F->hasName()) return false; 1127 StringRef Name = F->getName(); 1128 1129 // In these cases, the check of the length is required. We don't want to 1130 // return true for a name like "cos\0blah" which strcmp would return equal to 1131 // "cos", but has length 8. 1132 switch (Name[0]) { 1133 default: return false; 1134 case 'a': 1135 return Name == "acos" || Name == "asin" || 1136 Name == "atan" || Name == "atan2"; 1137 case 'c': 1138 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; 1139 case 'e': 1140 return Name == "exp" || Name == "exp2"; 1141 case 'f': 1142 return Name == "fabs" || Name == "fmod" || Name == "floor"; 1143 case 'l': 1144 return Name == "log" || Name == "log10"; 1145 case 'p': 1146 return Name == "pow"; 1147 case 's': 1148 return Name == "sin" || Name == "sinh" || Name == "sqrt" || 1149 Name == "sinf" || Name == "sqrtf"; 1150 case 't': 1151 return Name == "tan" || Name == "tanh"; 1152 } 1153} 1154 1155static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 1156 Type *Ty) { 1157 sys::llvm_fenv_clearexcept(); 1158 V = NativeFP(V); 1159 if (sys::llvm_fenv_testexcept()) { 1160 sys::llvm_fenv_clearexcept(); 1161 return 0; 1162 } 1163 1164 if (Ty->isFloatTy()) 1165 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1166 if (Ty->isDoubleTy()) 1167 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1168 llvm_unreachable("Can only constant fold float/double"); 1169} 1170 1171static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1172 double V, double W, Type *Ty) { 1173 sys::llvm_fenv_clearexcept(); 1174 V = NativeFP(V, W); 1175 if (sys::llvm_fenv_testexcept()) { 1176 sys::llvm_fenv_clearexcept(); 1177 return 0; 1178 } 1179 1180 if (Ty->isFloatTy()) 1181 return ConstantFP::get(Ty->getContext(), APFloat((float)V)); 1182 if (Ty->isDoubleTy()) 1183 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1184 llvm_unreachable("Can only constant fold float/double"); 1185} 1186 1187/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer 1188/// conversion of a constant floating point. If roundTowardZero is false, the 1189/// default IEEE rounding is used (toward nearest, ties to even). This matches 1190/// the behavior of the non-truncating SSE instructions in the default rounding 1191/// mode. The desired integer type Ty is used to select how many bits are 1192/// available for the result. Returns null if the conversion cannot be 1193/// performed, otherwise returns the Constant value resulting from the 1194/// conversion. 1195static Constant *ConstantFoldConvertToInt(const APFloat &Val, 1196 bool roundTowardZero, Type *Ty) { 1197 // All of these conversion intrinsics form an integer of at most 64bits. 1198 unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth(); 1199 assert(ResultWidth <= 64 && 1200 "Can only constant fold conversions to 64 and 32 bit ints"); 1201 1202 uint64_t UIntVal; 1203 bool isExact = false; 1204 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1205 : APFloat::rmNearestTiesToEven; 1206 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, 1207 /*isSigned=*/true, mode, 1208 &isExact); 1209 if (status != APFloat::opOK && status != APFloat::opInexact) 1210 return 0; 1211 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); 1212} 1213 1214/// ConstantFoldCall - Attempt to constant fold a call to the specified function 1215/// with the specified arguments, returning null if unsuccessful. 1216Constant * 1217llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands, 1218 const TargetLibraryInfo *TLI) { 1219 if (!F->hasName()) return 0; 1220 StringRef Name = F->getName(); 1221 1222 Type *Ty = F->getReturnType(); 1223 if (Operands.size() == 1) { 1224 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) { 1225 if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) { 1226 APFloat Val(Op->getValueAPF()); 1227 1228 bool lost = false; 1229 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); 1230 1231 return ConstantInt::get(F->getContext(), Val.bitcastToAPInt()); 1232 } 1233 if (!TLI) 1234 return 0; 1235 1236 if (!Ty->isFloatTy() && !Ty->isDoubleTy()) 1237 return 0; 1238 1239 /// We only fold functions with finite arguments. Folding NaN and inf is 1240 /// likely to be aborted with an exception anyway, and some host libms 1241 /// have known errors raising exceptions. 1242 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1243 return 0; 1244 1245 /// Currently APFloat versions of these functions do not exist, so we use 1246 /// the host native double versions. Float versions are not called 1247 /// directly but for all these it is true (float)(f((double)arg)) == 1248 /// f(arg). Long double not supported yet. 1249 double V = Ty->isFloatTy() ? (double)Op->getValueAPF().convertToFloat() : 1250 Op->getValueAPF().convertToDouble(); 1251 switch (Name[0]) { 1252 case 'a': 1253 if (Name == "acos" && TLI->has(LibFunc::acos)) 1254 return ConstantFoldFP(acos, V, Ty); 1255 else if (Name == "asin" && TLI->has(LibFunc::asin)) 1256 return ConstantFoldFP(asin, V, Ty); 1257 else if (Name == "atan" && TLI->has(LibFunc::atan)) 1258 return ConstantFoldFP(atan, V, Ty); 1259 break; 1260 case 'c': 1261 if (Name == "ceil" && TLI->has(LibFunc::ceil)) 1262 return ConstantFoldFP(ceil, V, Ty); 1263 else if (Name == "cos" && TLI->has(LibFunc::cos)) 1264 return ConstantFoldFP(cos, V, Ty); 1265 else if (Name == "cosh" && TLI->has(LibFunc::cosh)) 1266 return ConstantFoldFP(cosh, V, Ty); 1267 else if (Name == "cosf" && TLI->has(LibFunc::cosf)) 1268 return ConstantFoldFP(cos, V, Ty); 1269 break; 1270 case 'e': 1271 if (Name == "exp" && TLI->has(LibFunc::exp)) 1272 return ConstantFoldFP(exp, V, Ty); 1273 1274 if (Name == "exp2" && TLI->has(LibFunc::exp2)) { 1275 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a 1276 // C99 library. 1277 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1278 } 1279 break; 1280 case 'f': 1281 if (Name == "fabs" && TLI->has(LibFunc::fabs)) 1282 return ConstantFoldFP(fabs, V, Ty); 1283 else if (Name == "floor" && TLI->has(LibFunc::floor)) 1284 return ConstantFoldFP(floor, V, Ty); 1285 break; 1286 case 'l': 1287 if (Name == "log" && V > 0 && TLI->has(LibFunc::log)) 1288 return ConstantFoldFP(log, V, Ty); 1289 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) 1290 return ConstantFoldFP(log10, V, Ty); 1291 else if (F->getIntrinsicID() == Intrinsic::sqrt && 1292 (Ty->isFloatTy() || Ty->isDoubleTy())) { 1293 if (V >= -0.0) 1294 return ConstantFoldFP(sqrt, V, Ty); 1295 else // Undefined 1296 return Constant::getNullValue(Ty); 1297 } 1298 break; 1299 case 's': 1300 if (Name == "sin" && TLI->has(LibFunc::sin)) 1301 return ConstantFoldFP(sin, V, Ty); 1302 else if (Name == "sinh" && TLI->has(LibFunc::sinh)) 1303 return ConstantFoldFP(sinh, V, Ty); 1304 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) 1305 return ConstantFoldFP(sqrt, V, Ty); 1306 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)) 1307 return ConstantFoldFP(sqrt, V, Ty); 1308 else if (Name == "sinf" && TLI->has(LibFunc::sinf)) 1309 return ConstantFoldFP(sin, V, Ty); 1310 break; 1311 case 't': 1312 if (Name == "tan" && TLI->has(LibFunc::tan)) 1313 return ConstantFoldFP(tan, V, Ty); 1314 else if (Name == "tanh" && TLI->has(LibFunc::tanh)) 1315 return ConstantFoldFP(tanh, V, Ty); 1316 break; 1317 default: 1318 break; 1319 } 1320 return 0; 1321 } 1322 1323 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { 1324 switch (F->getIntrinsicID()) { 1325 case Intrinsic::bswap: 1326 return ConstantInt::get(F->getContext(), Op->getValue().byteSwap()); 1327 case Intrinsic::ctpop: 1328 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1329 case Intrinsic::convert_from_fp16: { 1330 APFloat Val(Op->getValue()); 1331 1332 bool lost = false; 1333 APFloat::opStatus status = 1334 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost); 1335 1336 // Conversion is always precise. 1337 (void)status; 1338 assert(status == APFloat::opOK && !lost && 1339 "Precision lost during fp16 constfolding"); 1340 1341 return ConstantFP::get(F->getContext(), Val); 1342 } 1343 default: 1344 return 0; 1345 } 1346 } 1347 1348 // Support ConstantVector in case we have an Undef in the top. 1349 if (isa<ConstantVector>(Operands[0]) || 1350 isa<ConstantDataVector>(Operands[0])) { 1351 Constant *Op = cast<Constant>(Operands[0]); 1352 switch (F->getIntrinsicID()) { 1353 default: break; 1354 case Intrinsic::x86_sse_cvtss2si: 1355 case Intrinsic::x86_sse_cvtss2si64: 1356 case Intrinsic::x86_sse2_cvtsd2si: 1357 case Intrinsic::x86_sse2_cvtsd2si64: 1358 if (ConstantFP *FPOp = 1359 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1360 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1361 /*roundTowardZero=*/false, Ty); 1362 case Intrinsic::x86_sse_cvttss2si: 1363 case Intrinsic::x86_sse_cvttss2si64: 1364 case Intrinsic::x86_sse2_cvttsd2si: 1365 case Intrinsic::x86_sse2_cvttsd2si64: 1366 if (ConstantFP *FPOp = 1367 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 1368 return ConstantFoldConvertToInt(FPOp->getValueAPF(), 1369 /*roundTowardZero=*/true, Ty); 1370 } 1371 } 1372 1373 if (isa<UndefValue>(Operands[0])) { 1374 if (F->getIntrinsicID() == Intrinsic::bswap) 1375 return Operands[0]; 1376 return 0; 1377 } 1378 1379 return 0; 1380 } 1381 1382 if (Operands.size() == 2) { 1383 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 1384 if (!Ty->isFloatTy() && !Ty->isDoubleTy()) 1385 return 0; 1386 double Op1V = Ty->isFloatTy() ? 1387 (double)Op1->getValueAPF().convertToFloat() : 1388 Op1->getValueAPF().convertToDouble(); 1389 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 1390 if (Op2->getType() != Op1->getType()) 1391 return 0; 1392 1393 double Op2V = Ty->isFloatTy() ? 1394 (double)Op2->getValueAPF().convertToFloat(): 1395 Op2->getValueAPF().convertToDouble(); 1396 1397 if (F->getIntrinsicID() == Intrinsic::pow) { 1398 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1399 } 1400 if (!TLI) 1401 return 0; 1402 if (Name == "pow" && TLI->has(LibFunc::pow)) 1403 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 1404 if (Name == "fmod" && TLI->has(LibFunc::fmod)) 1405 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); 1406 if (Name == "atan2" && TLI->has(LibFunc::atan2)) 1407 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 1408 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 1409 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy()) 1410 return ConstantFP::get(F->getContext(), 1411 APFloat((float)std::pow((float)Op1V, 1412 (int)Op2C->getZExtValue()))); 1413 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy()) 1414 return ConstantFP::get(F->getContext(), 1415 APFloat((double)std::pow((double)Op1V, 1416 (int)Op2C->getZExtValue()))); 1417 } 1418 return 0; 1419 } 1420 1421 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 1422 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 1423 switch (F->getIntrinsicID()) { 1424 default: break; 1425 case Intrinsic::sadd_with_overflow: 1426 case Intrinsic::uadd_with_overflow: 1427 case Intrinsic::ssub_with_overflow: 1428 case Intrinsic::usub_with_overflow: 1429 case Intrinsic::smul_with_overflow: 1430 case Intrinsic::umul_with_overflow: { 1431 APInt Res; 1432 bool Overflow; 1433 switch (F->getIntrinsicID()) { 1434 default: llvm_unreachable("Invalid case"); 1435 case Intrinsic::sadd_with_overflow: 1436 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); 1437 break; 1438 case Intrinsic::uadd_with_overflow: 1439 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); 1440 break; 1441 case Intrinsic::ssub_with_overflow: 1442 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); 1443 break; 1444 case Intrinsic::usub_with_overflow: 1445 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); 1446 break; 1447 case Intrinsic::smul_with_overflow: 1448 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); 1449 break; 1450 case Intrinsic::umul_with_overflow: 1451 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); 1452 break; 1453 } 1454 Constant *Ops[] = { 1455 ConstantInt::get(F->getContext(), Res), 1456 ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow) 1457 }; 1458 return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops); 1459 } 1460 case Intrinsic::cttz: 1461 // FIXME: This should check for Op2 == 1, and become unreachable if 1462 // Op1 == 0. 1463 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); 1464 case Intrinsic::ctlz: 1465 // FIXME: This should check for Op2 == 1, and become unreachable if 1466 // Op1 == 0. 1467 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); 1468 } 1469 } 1470 1471 return 0; 1472 } 1473 return 0; 1474 } 1475 return 0; 1476} 1477