ValueTracking.cpp revision fb384d61c78b60787ed65475d8403aee65023962
1//===- ValueTracking.cpp - Walk computations to compute properties --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains routines that help analyze properties that chains of 11// computations have. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Analysis/ValueTracking.h" 16#include "llvm/Analysis/InstructionSimplify.h" 17#include "llvm/Constants.h" 18#include "llvm/Instructions.h" 19#include "llvm/GlobalVariable.h" 20#include "llvm/GlobalAlias.h" 21#include "llvm/IntrinsicInst.h" 22#include "llvm/LLVMContext.h" 23#include "llvm/Metadata.h" 24#include "llvm/Operator.h" 25#include "llvm/DataLayout.h" 26#include "llvm/Support/ConstantRange.h" 27#include "llvm/Support/GetElementPtrTypeIterator.h" 28#include "llvm/Support/MathExtras.h" 29#include "llvm/Support/PatternMatch.h" 30#include "llvm/ADT/SmallPtrSet.h" 31#include <cstring> 32using namespace llvm; 33using namespace llvm::PatternMatch; 34 35const unsigned MaxDepth = 6; 36 37/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if 38/// unknown returns 0). For vector types, returns the element type's bitwidth. 39static unsigned getBitWidth(Type *Ty, const DataLayout *TD) { 40 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 41 return BitWidth; 42 assert(isa<PointerType>(Ty) && "Expected a pointer type!"); 43 return TD ? TD->getPointerSizeInBits() : 0; 44} 45 46static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 47 APInt &KnownZero, APInt &KnownOne, 48 APInt &KnownZero2, APInt &KnownOne2, 49 const DataLayout *TD, unsigned Depth) { 50 if (!Add) { 51 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 52 // We know that the top bits of C-X are clear if X contains less bits 53 // than C (i.e. no wrap-around can happen). For example, 20-X is 54 // positive if we can prove that X is >= 0 and < 16. 55 if (!CLHS->getValue().isNegative()) { 56 unsigned BitWidth = KnownZero.getBitWidth(); 57 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 58 // NLZ can't be BitWidth with no sign bit 59 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 60 llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1); 61 62 // If all of the MaskV bits are known to be zero, then we know the 63 // output top bits are zero, because we now know that the output is 64 // from [0-C]. 65 if ((KnownZero2 & MaskV) == MaskV) { 66 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 67 // Top bits known zero. 68 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 69 } 70 } 71 } 72 } 73 74 unsigned BitWidth = KnownZero.getBitWidth(); 75 76 // If one of the operands has trailing zeros, then the bits that the 77 // other operand has in those bit positions will be preserved in the 78 // result. For an add, this works with either operand. For a subtract, 79 // this only works if the known zeros are in the right operand. 80 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 81 llvm::ComputeMaskedBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1); 82 assert((LHSKnownZero & LHSKnownOne) == 0 && 83 "Bits known to be one AND zero?"); 84 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); 85 86 llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1); 87 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 88 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); 89 90 // Determine which operand has more trailing zeros, and use that 91 // many bits from the other operand. 92 if (LHSKnownZeroOut > RHSKnownZeroOut) { 93 if (Add) { 94 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); 95 KnownZero |= KnownZero2 & Mask; 96 KnownOne |= KnownOne2 & Mask; 97 } else { 98 // If the known zeros are in the left operand for a subtract, 99 // fall back to the minimum known zeros in both operands. 100 KnownZero |= APInt::getLowBitsSet(BitWidth, 101 std::min(LHSKnownZeroOut, 102 RHSKnownZeroOut)); 103 } 104 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { 105 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); 106 KnownZero |= LHSKnownZero & Mask; 107 KnownOne |= LHSKnownOne & Mask; 108 } 109 110 // Are we still trying to solve for the sign bit? 111 if (!KnownZero.isNegative() && !KnownOne.isNegative()) { 112 if (NSW) { 113 if (Add) { 114 // Adding two positive numbers can't wrap into negative 115 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 116 KnownZero |= APInt::getSignBit(BitWidth); 117 // and adding two negative numbers can't wrap into positive. 118 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 119 KnownOne |= APInt::getSignBit(BitWidth); 120 } else { 121 // Subtracting a negative number from a positive one can't wrap 122 if (LHSKnownZero.isNegative() && KnownOne2.isNegative()) 123 KnownZero |= APInt::getSignBit(BitWidth); 124 // neither can subtracting a positive number from a negative one. 125 else if (LHSKnownOne.isNegative() && KnownZero2.isNegative()) 126 KnownOne |= APInt::getSignBit(BitWidth); 127 } 128 } 129 } 130} 131 132static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW, 133 APInt &KnownZero, APInt &KnownOne, 134 APInt &KnownZero2, APInt &KnownOne2, 135 const DataLayout *TD, unsigned Depth) { 136 unsigned BitWidth = KnownZero.getBitWidth(); 137 ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1); 138 ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1); 139 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 140 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 141 142 bool isKnownNegative = false; 143 bool isKnownNonNegative = false; 144 // If the multiplication is known not to overflow, compute the sign bit. 145 if (NSW) { 146 if (Op0 == Op1) { 147 // The product of a number with itself is non-negative. 148 isKnownNonNegative = true; 149 } else { 150 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 151 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 152 bool isKnownNegativeOp1 = KnownOne.isNegative(); 153 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 154 // The product of two numbers with the same sign is non-negative. 155 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 156 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 157 // The product of a negative number and a non-negative number is either 158 // negative or zero. 159 if (!isKnownNonNegative) 160 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 161 isKnownNonZero(Op0, TD, Depth)) || 162 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 163 isKnownNonZero(Op1, TD, Depth)); 164 } 165 } 166 167 // If low bits are zero in either operand, output low known-0 bits. 168 // Also compute a conserative estimate for high known-0 bits. 169 // More trickiness is possible, but this is sufficient for the 170 // interesting case of alignment computation. 171 KnownOne.clearAllBits(); 172 unsigned TrailZ = KnownZero.countTrailingOnes() + 173 KnownZero2.countTrailingOnes(); 174 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 175 KnownZero2.countLeadingOnes(), 176 BitWidth) - BitWidth; 177 178 TrailZ = std::min(TrailZ, BitWidth); 179 LeadZ = std::min(LeadZ, BitWidth); 180 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 181 APInt::getHighBitsSet(BitWidth, LeadZ); 182 183 // Only make use of no-wrap flags if we failed to compute the sign bit 184 // directly. This matters if the multiplication always overflows, in 185 // which case we prefer to follow the result of the direct computation, 186 // though as the program is invoking undefined behaviour we can choose 187 // whatever we like here. 188 if (isKnownNonNegative && !KnownOne.isNegative()) 189 KnownZero.setBit(BitWidth - 1); 190 else if (isKnownNegative && !KnownZero.isNegative()) 191 KnownOne.setBit(BitWidth - 1); 192} 193 194void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) { 195 unsigned BitWidth = KnownZero.getBitWidth(); 196 unsigned NumRanges = Ranges.getNumOperands() / 2; 197 assert(NumRanges >= 1); 198 199 // Use the high end of the ranges to find leading zeros. 200 unsigned MinLeadingZeros = BitWidth; 201 for (unsigned i = 0; i < NumRanges; ++i) { 202 ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0)); 203 ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1)); 204 ConstantRange Range(Lower->getValue(), Upper->getValue()); 205 if (Range.isWrappedSet()) 206 MinLeadingZeros = 0; // -1 has no zeros 207 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros(); 208 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros); 209 } 210 211 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros); 212} 213/// ComputeMaskedBits - Determine which of the bits are known to be either zero 214/// or one and return them in the KnownZero/KnownOne bit sets. 215/// 216/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 217/// we cannot optimize based on the assumption that it is zero without changing 218/// it to be an explicit zero. If we don't change it to zero, other code could 219/// optimized based on the contradictory assumption that it is non-zero. 220/// Because instcombine aggressively folds operations with undef args anyway, 221/// this won't lose us code quality. 222/// 223/// This function is defined on values with integer type, values with pointer 224/// type (but only if TD is non-null), and vectors of integers. In the case 225/// where V is a vector, known zero, and known one values are the 226/// same width as the vector element, and the bit is set only if it is true 227/// for all of the elements in the vector. 228void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, 229 const DataLayout *TD, unsigned Depth) { 230 assert(V && "No Value?"); 231 assert(Depth <= MaxDepth && "Limit Search Depth"); 232 unsigned BitWidth = KnownZero.getBitWidth(); 233 234 assert((V->getType()->isIntOrIntVectorTy() || 235 V->getType()->getScalarType()->isPointerTy()) && 236 "Not integer or pointer type!"); 237 assert((!TD || 238 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 239 (!V->getType()->isIntOrIntVectorTy() || 240 V->getType()->getScalarSizeInBits() == BitWidth) && 241 KnownZero.getBitWidth() == BitWidth && 242 KnownOne.getBitWidth() == BitWidth && 243 "V, Mask, KnownOne and KnownZero should have same BitWidth"); 244 245 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 246 // We know all of the bits for a constant! 247 KnownOne = CI->getValue(); 248 KnownZero = ~KnownOne; 249 return; 250 } 251 // Null and aggregate-zero are all-zeros. 252 if (isa<ConstantPointerNull>(V) || 253 isa<ConstantAggregateZero>(V)) { 254 KnownOne.clearAllBits(); 255 KnownZero = APInt::getAllOnesValue(BitWidth); 256 return; 257 } 258 // Handle a constant vector by taking the intersection of the known bits of 259 // each element. There is no real need to handle ConstantVector here, because 260 // we don't handle undef in any particularly useful way. 261 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 262 // We know that CDS must be a vector of integers. Take the intersection of 263 // each element. 264 KnownZero.setAllBits(); KnownOne.setAllBits(); 265 APInt Elt(KnownZero.getBitWidth(), 0); 266 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 267 Elt = CDS->getElementAsInteger(i); 268 KnownZero &= ~Elt; 269 KnownOne &= Elt; 270 } 271 return; 272 } 273 274 // The address of an aligned GlobalValue has trailing zeros. 275 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 276 unsigned Align = GV->getAlignment(); 277 if (Align == 0 && TD) { 278 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) { 279 Type *ObjectType = GVar->getType()->getElementType(); 280 if (ObjectType->isSized()) { 281 // If the object is defined in the current Module, we'll be giving 282 // it the preferred alignment. Otherwise, we have to assume that it 283 // may only have the minimum ABI alignment. 284 if (!GVar->isDeclaration() && !GVar->isWeakForLinker()) 285 Align = TD->getPreferredAlignment(GVar); 286 else 287 Align = TD->getABITypeAlignment(ObjectType); 288 } 289 } 290 } 291 if (Align > 0) 292 KnownZero = APInt::getLowBitsSet(BitWidth, 293 CountTrailingZeros_32(Align)); 294 else 295 KnownZero.clearAllBits(); 296 KnownOne.clearAllBits(); 297 return; 298 } 299 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 300 // the bits of its aliasee. 301 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 302 if (GA->mayBeOverridden()) { 303 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 304 } else { 305 ComputeMaskedBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1); 306 } 307 return; 308 } 309 310 if (Argument *A = dyn_cast<Argument>(V)) { 311 unsigned Align = 0; 312 313 if (A->hasByValAttr()) { 314 // Get alignment information off byval arguments if specified in the IR. 315 Align = A->getParamAlignment(); 316 } else if (TD && A->hasStructRetAttr()) { 317 // An sret parameter has at least the ABI alignment of the return type. 318 Type *EltTy = cast<PointerType>(A->getType())->getElementType(); 319 if (EltTy->isSized()) 320 Align = TD->getABITypeAlignment(EltTy); 321 } 322 323 if (Align) 324 KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align)); 325 return; 326 } 327 328 // Start out not knowing anything. 329 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 330 331 if (Depth == MaxDepth) 332 return; // Limit search depth. 333 334 Operator *I = dyn_cast<Operator>(V); 335 if (!I) return; 336 337 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 338 switch (I->getOpcode()) { 339 default: break; 340 case Instruction::Load: 341 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 342 computeMaskedBitsLoad(*MD, KnownZero); 343 return; 344 case Instruction::And: { 345 // If either the LHS or the RHS are Zero, the result is zero. 346 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 347 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 348 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 349 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 350 351 // Output known-1 bits are only known if set in both the LHS & RHS. 352 KnownOne &= KnownOne2; 353 // Output known-0 are known to be clear if zero in either the LHS | RHS. 354 KnownZero |= KnownZero2; 355 return; 356 } 357 case Instruction::Or: { 358 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 359 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 360 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 361 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 362 363 // Output known-0 bits are only known if clear in both the LHS & RHS. 364 KnownZero &= KnownZero2; 365 // Output known-1 are known to be set if set in either the LHS | RHS. 366 KnownOne |= KnownOne2; 367 return; 368 } 369 case Instruction::Xor: { 370 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 371 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 372 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 373 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 374 375 // Output known-0 bits are known if clear or set in both the LHS & RHS. 376 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 377 // Output known-1 are known to be set if set in only one of the LHS, RHS. 378 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 379 KnownZero = KnownZeroOut; 380 return; 381 } 382 case Instruction::Mul: { 383 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 384 ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW, 385 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth); 386 break; 387 } 388 case Instruction::UDiv: { 389 // For the purposes of computing leading zeros we can conservatively 390 // treat a udiv as a logical right shift by the power of 2 known to 391 // be less than the denominator. 392 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 393 unsigned LeadZ = KnownZero2.countLeadingOnes(); 394 395 KnownOne2.clearAllBits(); 396 KnownZero2.clearAllBits(); 397 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1); 398 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 399 if (RHSUnknownLeadingOnes != BitWidth) 400 LeadZ = std::min(BitWidth, 401 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 402 403 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 404 return; 405 } 406 case Instruction::Select: 407 ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1); 408 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, 409 Depth+1); 410 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 411 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 412 413 // Only known if known in both the LHS and RHS. 414 KnownOne &= KnownOne2; 415 KnownZero &= KnownZero2; 416 return; 417 case Instruction::FPTrunc: 418 case Instruction::FPExt: 419 case Instruction::FPToUI: 420 case Instruction::FPToSI: 421 case Instruction::SIToFP: 422 case Instruction::UIToFP: 423 return; // Can't work with floating point. 424 case Instruction::PtrToInt: 425 case Instruction::IntToPtr: 426 // We can't handle these if we don't know the pointer size. 427 if (!TD) return; 428 // FALL THROUGH and handle them the same as zext/trunc. 429 case Instruction::ZExt: 430 case Instruction::Trunc: { 431 Type *SrcTy = I->getOperand(0)->getType(); 432 433 unsigned SrcBitWidth; 434 // Note that we handle pointer operands here because of inttoptr/ptrtoint 435 // which fall through here. 436 if (SrcTy->isPointerTy()) 437 SrcBitWidth = TD->getTypeSizeInBits(SrcTy); 438 else 439 SrcBitWidth = SrcTy->getScalarSizeInBits(); 440 441 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 442 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 443 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 444 KnownZero = KnownZero.zextOrTrunc(BitWidth); 445 KnownOne = KnownOne.zextOrTrunc(BitWidth); 446 // Any top bits are known to be zero. 447 if (BitWidth > SrcBitWidth) 448 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 449 return; 450 } 451 case Instruction::BitCast: { 452 Type *SrcTy = I->getOperand(0)->getType(); 453 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 454 // TODO: For now, not handling conversions like: 455 // (bitcast i64 %x to <2 x i32>) 456 !I->getType()->isVectorTy()) { 457 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 458 return; 459 } 460 break; 461 } 462 case Instruction::SExt: { 463 // Compute the bits in the result that are not present in the input. 464 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 465 466 KnownZero = KnownZero.trunc(SrcBitWidth); 467 KnownOne = KnownOne.trunc(SrcBitWidth); 468 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 469 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 470 KnownZero = KnownZero.zext(BitWidth); 471 KnownOne = KnownOne.zext(BitWidth); 472 473 // If the sign bit of the input is known set or clear, then we know the 474 // top bits of the result. 475 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 476 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 477 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 478 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 479 return; 480 } 481 case Instruction::Shl: 482 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 483 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 484 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 485 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 486 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 487 KnownZero <<= ShiftAmt; 488 KnownOne <<= ShiftAmt; 489 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 490 return; 491 } 492 break; 493 case Instruction::LShr: 494 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 495 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 496 // Compute the new bits that are at the top now. 497 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 498 499 // Unsigned shift right. 500 ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1); 501 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 502 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 503 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 504 // high bits known zero. 505 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 506 return; 507 } 508 break; 509 case Instruction::AShr: 510 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 511 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 512 // Compute the new bits that are at the top now. 513 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 514 515 // Signed shift right. 516 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 517 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 518 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 519 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 520 521 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 522 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 523 KnownZero |= HighBits; 524 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 525 KnownOne |= HighBits; 526 return; 527 } 528 break; 529 case Instruction::Sub: { 530 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 531 ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 532 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 533 Depth); 534 break; 535 } 536 case Instruction::Add: { 537 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 538 ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 539 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 540 Depth); 541 break; 542 } 543 case Instruction::SRem: 544 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 545 APInt RA = Rem->getValue().abs(); 546 if (RA.isPowerOf2()) { 547 APInt LowBits = RA - 1; 548 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 549 550 // The low bits of the first operand are unchanged by the srem. 551 KnownZero = KnownZero2 & LowBits; 552 KnownOne = KnownOne2 & LowBits; 553 554 // If the first operand is non-negative or has all low bits zero, then 555 // the upper bits are all zero. 556 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 557 KnownZero |= ~LowBits; 558 559 // If the first operand is negative and not all low bits are zero, then 560 // the upper bits are all one. 561 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 562 KnownOne |= ~LowBits; 563 564 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 565 } 566 } 567 568 // The sign bit is the LHS's sign bit, except when the result of the 569 // remainder is zero. 570 if (KnownZero.isNonNegative()) { 571 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 572 ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD, 573 Depth+1); 574 // If it's known zero, our sign bit is also zero. 575 if (LHSKnownZero.isNegative()) 576 KnownZero.setBit(BitWidth - 1); 577 } 578 579 break; 580 case Instruction::URem: { 581 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 582 APInt RA = Rem->getValue(); 583 if (RA.isPowerOf2()) { 584 APInt LowBits = (RA - 1); 585 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, 586 Depth+1); 587 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 588 KnownZero |= ~LowBits; 589 KnownOne &= LowBits; 590 break; 591 } 592 } 593 594 // Since the result is less than or equal to either operand, any leading 595 // zero bits in either operand must also exist in the result. 596 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 597 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1); 598 599 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 600 KnownZero2.countLeadingOnes()); 601 KnownOne.clearAllBits(); 602 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 603 break; 604 } 605 606 case Instruction::Alloca: { 607 AllocaInst *AI = cast<AllocaInst>(V); 608 unsigned Align = AI->getAlignment(); 609 if (Align == 0 && TD) 610 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 611 612 if (Align > 0) 613 KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align)); 614 break; 615 } 616 case Instruction::GetElementPtr: { 617 // Analyze all of the subscripts of this getelementptr instruction 618 // to determine if we can prove known low zero bits. 619 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 620 ComputeMaskedBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD, 621 Depth+1); 622 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 623 624 gep_type_iterator GTI = gep_type_begin(I); 625 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 626 Value *Index = I->getOperand(i); 627 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 628 // Handle struct member offset arithmetic. 629 if (!TD) return; 630 const StructLayout *SL = TD->getStructLayout(STy); 631 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 632 uint64_t Offset = SL->getElementOffset(Idx); 633 TrailZ = std::min(TrailZ, 634 CountTrailingZeros_64(Offset)); 635 } else { 636 // Handle array index arithmetic. 637 Type *IndexedTy = GTI.getIndexedType(); 638 if (!IndexedTy->isSized()) return; 639 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 640 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; 641 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 642 ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1); 643 TrailZ = std::min(TrailZ, 644 unsigned(CountTrailingZeros_64(TypeSize) + 645 LocalKnownZero.countTrailingOnes())); 646 } 647 } 648 649 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 650 break; 651 } 652 case Instruction::PHI: { 653 PHINode *P = cast<PHINode>(I); 654 // Handle the case of a simple two-predecessor recurrence PHI. 655 // There's a lot more that could theoretically be done here, but 656 // this is sufficient to catch some interesting cases. 657 if (P->getNumIncomingValues() == 2) { 658 for (unsigned i = 0; i != 2; ++i) { 659 Value *L = P->getIncomingValue(i); 660 Value *R = P->getIncomingValue(!i); 661 Operator *LU = dyn_cast<Operator>(L); 662 if (!LU) 663 continue; 664 unsigned Opcode = LU->getOpcode(); 665 // Check for operations that have the property that if 666 // both their operands have low zero bits, the result 667 // will have low zero bits. 668 if (Opcode == Instruction::Add || 669 Opcode == Instruction::Sub || 670 Opcode == Instruction::And || 671 Opcode == Instruction::Or || 672 Opcode == Instruction::Mul) { 673 Value *LL = LU->getOperand(0); 674 Value *LR = LU->getOperand(1); 675 // Find a recurrence. 676 if (LL == I) 677 L = LR; 678 else if (LR == I) 679 L = LL; 680 else 681 break; 682 // Ok, we have a PHI of the form L op= R. Check for low 683 // zero bits. 684 ComputeMaskedBits(R, KnownZero2, KnownOne2, TD, Depth+1); 685 686 // We need to take the minimum number of known bits 687 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 688 ComputeMaskedBits(L, KnownZero3, KnownOne3, TD, Depth+1); 689 690 KnownZero = APInt::getLowBitsSet(BitWidth, 691 std::min(KnownZero2.countTrailingOnes(), 692 KnownZero3.countTrailingOnes())); 693 break; 694 } 695 } 696 } 697 698 // Unreachable blocks may have zero-operand PHI nodes. 699 if (P->getNumIncomingValues() == 0) 700 return; 701 702 // Otherwise take the unions of the known bit sets of the operands, 703 // taking conservative care to avoid excessive recursion. 704 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 705 // Skip if every incoming value references to ourself. 706 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 707 break; 708 709 KnownZero = APInt::getAllOnesValue(BitWidth); 710 KnownOne = APInt::getAllOnesValue(BitWidth); 711 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { 712 // Skip direct self references. 713 if (P->getIncomingValue(i) == P) continue; 714 715 KnownZero2 = APInt(BitWidth, 0); 716 KnownOne2 = APInt(BitWidth, 0); 717 // Recurse, but cap the recursion to one level, because we don't 718 // want to waste time spinning around in loops. 719 ComputeMaskedBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD, 720 MaxDepth-1); 721 KnownZero &= KnownZero2; 722 KnownOne &= KnownOne2; 723 // If all bits have been ruled out, there's no need to check 724 // more operands. 725 if (!KnownZero && !KnownOne) 726 break; 727 } 728 } 729 break; 730 } 731 case Instruction::Call: 732 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 733 switch (II->getIntrinsicID()) { 734 default: break; 735 case Intrinsic::ctlz: 736 case Intrinsic::cttz: { 737 unsigned LowBits = Log2_32(BitWidth)+1; 738 // If this call is undefined for 0, the result will be less than 2^n. 739 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 740 LowBits -= 1; 741 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 742 break; 743 } 744 case Intrinsic::ctpop: { 745 unsigned LowBits = Log2_32(BitWidth)+1; 746 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 747 break; 748 } 749 case Intrinsic::x86_sse42_crc32_64_8: 750 case Intrinsic::x86_sse42_crc32_64_64: 751 KnownZero = APInt::getHighBitsSet(64, 32); 752 break; 753 } 754 } 755 break; 756 case Instruction::ExtractValue: 757 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 758 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 759 if (EVI->getNumIndices() != 1) break; 760 if (EVI->getIndices()[0] == 0) { 761 switch (II->getIntrinsicID()) { 762 default: break; 763 case Intrinsic::uadd_with_overflow: 764 case Intrinsic::sadd_with_overflow: 765 ComputeMaskedBitsAddSub(true, II->getArgOperand(0), 766 II->getArgOperand(1), false, KnownZero, 767 KnownOne, KnownZero2, KnownOne2, TD, Depth); 768 break; 769 case Intrinsic::usub_with_overflow: 770 case Intrinsic::ssub_with_overflow: 771 ComputeMaskedBitsAddSub(false, II->getArgOperand(0), 772 II->getArgOperand(1), false, KnownZero, 773 KnownOne, KnownZero2, KnownOne2, TD, Depth); 774 break; 775 case Intrinsic::umul_with_overflow: 776 case Intrinsic::smul_with_overflow: 777 ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1), 778 false, KnownZero, KnownOne, 779 KnownZero2, KnownOne2, TD, Depth); 780 break; 781 } 782 } 783 } 784 } 785} 786 787/// ComputeSignBit - Determine whether the sign bit is known to be zero or 788/// one. Convenience wrapper around ComputeMaskedBits. 789void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 790 const DataLayout *TD, unsigned Depth) { 791 unsigned BitWidth = getBitWidth(V->getType(), TD); 792 if (!BitWidth) { 793 KnownZero = false; 794 KnownOne = false; 795 return; 796 } 797 APInt ZeroBits(BitWidth, 0); 798 APInt OneBits(BitWidth, 0); 799 ComputeMaskedBits(V, ZeroBits, OneBits, TD, Depth); 800 KnownOne = OneBits[BitWidth - 1]; 801 KnownZero = ZeroBits[BitWidth - 1]; 802} 803 804/// isPowerOfTwo - Return true if the given value is known to have exactly one 805/// bit set when defined. For vectors return true if every element is known to 806/// be a power of two when defined. Supports values with integer or pointer 807/// types and vectors of integers. 808bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero, 809 unsigned Depth) { 810 if (Constant *C = dyn_cast<Constant>(V)) { 811 if (C->isNullValue()) 812 return OrZero; 813 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 814 return CI->getValue().isPowerOf2(); 815 // TODO: Handle vector constants. 816 } 817 818 // 1 << X is clearly a power of two if the one is not shifted off the end. If 819 // it is shifted off the end then the result is undefined. 820 if (match(V, m_Shl(m_One(), m_Value()))) 821 return true; 822 823 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 824 // bottom. If it is shifted off the bottom then the result is undefined. 825 if (match(V, m_LShr(m_SignBit(), m_Value()))) 826 return true; 827 828 // The remaining tests are all recursive, so bail out if we hit the limit. 829 if (Depth++ == MaxDepth) 830 return false; 831 832 Value *X = 0, *Y = 0; 833 // A shift of a power of two is a power of two or zero. 834 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 835 match(V, m_Shr(m_Value(X), m_Value())))) 836 return isPowerOfTwo(X, TD, /*OrZero*/true, Depth); 837 838 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 839 return isPowerOfTwo(ZI->getOperand(0), TD, OrZero, Depth); 840 841 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 842 return isPowerOfTwo(SI->getTrueValue(), TD, OrZero, Depth) && 843 isPowerOfTwo(SI->getFalseValue(), TD, OrZero, Depth); 844 845 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 846 // A power of two and'd with anything is a power of two or zero. 847 if (isPowerOfTwo(X, TD, /*OrZero*/true, Depth) || 848 isPowerOfTwo(Y, TD, /*OrZero*/true, Depth)) 849 return true; 850 // X & (-X) is always a power of two or zero. 851 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 852 return true; 853 return false; 854 } 855 856 // An exact divide or right shift can only shift off zero bits, so the result 857 // is a power of two only if the first operand is a power of two and not 858 // copying a sign bit (sdiv int_min, 2). 859 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 860 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 861 return isPowerOfTwo(cast<Operator>(V)->getOperand(0), TD, OrZero, Depth); 862 } 863 864 return false; 865} 866 867/// isKnownNonZero - Return true if the given value is known to be non-zero 868/// when defined. For vectors return true if every element is known to be 869/// non-zero when defined. Supports values with integer or pointer type and 870/// vectors of integers. 871bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) { 872 if (Constant *C = dyn_cast<Constant>(V)) { 873 if (C->isNullValue()) 874 return false; 875 if (isa<ConstantInt>(C)) 876 // Must be non-zero due to null test above. 877 return true; 878 // TODO: Handle vectors 879 return false; 880 } 881 882 // The remaining tests are all recursive, so bail out if we hit the limit. 883 if (Depth++ >= MaxDepth) 884 return false; 885 886 unsigned BitWidth = getBitWidth(V->getType(), TD); 887 888 // X | Y != 0 if X != 0 or Y != 0. 889 Value *X = 0, *Y = 0; 890 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 891 return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth); 892 893 // ext X != 0 if X != 0. 894 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 895 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth); 896 897 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 898 // if the lowest bit is shifted off the end. 899 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 900 // shl nuw can't remove any non-zero bits. 901 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 902 if (BO->hasNoUnsignedWrap()) 903 return isKnownNonZero(X, TD, Depth); 904 905 APInt KnownZero(BitWidth, 0); 906 APInt KnownOne(BitWidth, 0); 907 ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth); 908 if (KnownOne[0]) 909 return true; 910 } 911 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 912 // defined if the sign bit is shifted off the end. 913 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 914 // shr exact can only shift out zero bits. 915 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 916 if (BO->isExact()) 917 return isKnownNonZero(X, TD, Depth); 918 919 bool XKnownNonNegative, XKnownNegative; 920 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); 921 if (XKnownNegative) 922 return true; 923 } 924 // div exact can only produce a zero if the dividend is zero. 925 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 926 return isKnownNonZero(X, TD, Depth); 927 } 928 // X + Y. 929 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 930 bool XKnownNonNegative, XKnownNegative; 931 bool YKnownNonNegative, YKnownNegative; 932 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); 933 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth); 934 935 // If X and Y are both non-negative (as signed values) then their sum is not 936 // zero unless both X and Y are zero. 937 if (XKnownNonNegative && YKnownNonNegative) 938 if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth)) 939 return true; 940 941 // If X and Y are both negative (as signed values) then their sum is not 942 // zero unless both X and Y equal INT_MIN. 943 if (BitWidth && XKnownNegative && YKnownNegative) { 944 APInt KnownZero(BitWidth, 0); 945 APInt KnownOne(BitWidth, 0); 946 APInt Mask = APInt::getSignedMaxValue(BitWidth); 947 // The sign bit of X is set. If some other bit is set then X is not equal 948 // to INT_MIN. 949 ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth); 950 if ((KnownOne & Mask) != 0) 951 return true; 952 // The sign bit of Y is set. If some other bit is set then Y is not equal 953 // to INT_MIN. 954 ComputeMaskedBits(Y, KnownZero, KnownOne, TD, Depth); 955 if ((KnownOne & Mask) != 0) 956 return true; 957 } 958 959 // The sum of a non-negative number and a power of two is not zero. 960 if (XKnownNonNegative && isPowerOfTwo(Y, TD, /*OrZero*/false, Depth)) 961 return true; 962 if (YKnownNonNegative && isPowerOfTwo(X, TD, /*OrZero*/false, Depth)) 963 return true; 964 } 965 // X * Y. 966 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 967 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 968 // If X and Y are non-zero then so is X * Y as long as the multiplication 969 // does not overflow. 970 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 971 isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth)) 972 return true; 973 } 974 // (C ? X : Y) != 0 if X != 0 and Y != 0. 975 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 976 if (isKnownNonZero(SI->getTrueValue(), TD, Depth) && 977 isKnownNonZero(SI->getFalseValue(), TD, Depth)) 978 return true; 979 } 980 981 if (!BitWidth) return false; 982 APInt KnownZero(BitWidth, 0); 983 APInt KnownOne(BitWidth, 0); 984 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 985 return KnownOne != 0; 986} 987 988/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 989/// this predicate to simplify operations downstream. Mask is known to be zero 990/// for bits that V cannot have. 991/// 992/// This function is defined on values with integer type, values with pointer 993/// type (but only if TD is non-null), and vectors of integers. In the case 994/// where V is a vector, the mask, known zero, and known one values are the 995/// same width as the vector element, and the bit is set only if it is true 996/// for all of the elements in the vector. 997bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, 998 const DataLayout *TD, unsigned Depth) { 999 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 1000 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 1001 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1002 return (KnownZero & Mask) == Mask; 1003} 1004 1005 1006 1007/// ComputeNumSignBits - Return the number of times the sign bit of the 1008/// register is replicated into the other bits. We know that at least 1 bit 1009/// is always equal to the sign bit (itself), but other cases can give us 1010/// information. For example, immediately after an "ashr X, 2", we know that 1011/// the top 3 bits are all equal to each other, so we return 3. 1012/// 1013/// 'Op' must have a scalar integer type. 1014/// 1015unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD, 1016 unsigned Depth) { 1017 assert((TD || V->getType()->isIntOrIntVectorTy()) && 1018 "ComputeNumSignBits requires a DataLayout object to operate " 1019 "on non-integer values!"); 1020 Type *Ty = V->getType(); 1021 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : 1022 Ty->getScalarSizeInBits(); 1023 unsigned Tmp, Tmp2; 1024 unsigned FirstAnswer = 1; 1025 1026 // Note that ConstantInt is handled by the general ComputeMaskedBits case 1027 // below. 1028 1029 if (Depth == 6) 1030 return 1; // Limit search depth. 1031 1032 Operator *U = dyn_cast<Operator>(V); 1033 switch (Operator::getOpcode(V)) { 1034 default: break; 1035 case Instruction::SExt: 1036 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 1037 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; 1038 1039 case Instruction::AShr: { 1040 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1041 // ashr X, C -> adds C sign bits. Vectors too. 1042 const APInt *ShAmt; 1043 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1044 Tmp += ShAmt->getZExtValue(); 1045 if (Tmp > TyBits) Tmp = TyBits; 1046 } 1047 return Tmp; 1048 } 1049 case Instruction::Shl: { 1050 const APInt *ShAmt; 1051 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1052 // shl destroys sign bits. 1053 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1054 Tmp2 = ShAmt->getZExtValue(); 1055 if (Tmp2 >= TyBits || // Bad shift. 1056 Tmp2 >= Tmp) break; // Shifted all sign bits out. 1057 return Tmp - Tmp2; 1058 } 1059 break; 1060 } 1061 case Instruction::And: 1062 case Instruction::Or: 1063 case Instruction::Xor: // NOT is handled here. 1064 // Logical binary ops preserve the number of sign bits at the worst. 1065 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1066 if (Tmp != 1) { 1067 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1068 FirstAnswer = std::min(Tmp, Tmp2); 1069 // We computed what we know about the sign bits as our first 1070 // answer. Now proceed to the generic code that uses 1071 // ComputeMaskedBits, and pick whichever answer is better. 1072 } 1073 break; 1074 1075 case Instruction::Select: 1076 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1077 if (Tmp == 1) return 1; // Early out. 1078 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1); 1079 return std::min(Tmp, Tmp2); 1080 1081 case Instruction::Add: 1082 // Add can have at most one carry bit. Thus we know that the output 1083 // is, at worst, one more bit than the inputs. 1084 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1085 if (Tmp == 1) return 1; // Early out. 1086 1087 // Special case decrementing a value (ADD X, -1): 1088 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) 1089 if (CRHS->isAllOnesValue()) { 1090 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1091 ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 1092 1093 // If the input is known to be 0 or 1, the output is 0/-1, which is all 1094 // sign bits set. 1095 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 1096 return TyBits; 1097 1098 // If we are subtracting one from a positive number, there is no carry 1099 // out of the result. 1100 if (KnownZero.isNegative()) 1101 return Tmp; 1102 } 1103 1104 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1105 if (Tmp2 == 1) return 1; 1106 return std::min(Tmp, Tmp2)-1; 1107 1108 case Instruction::Sub: 1109 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1110 if (Tmp2 == 1) return 1; 1111 1112 // Handle NEG. 1113 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) 1114 if (CLHS->isNullValue()) { 1115 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1116 ComputeMaskedBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 1117 // If the input is known to be 0 or 1, the output is 0/-1, which is all 1118 // sign bits set. 1119 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 1120 return TyBits; 1121 1122 // If the input is known to be positive (the sign bit is known clear), 1123 // the output of the NEG has the same number of sign bits as the input. 1124 if (KnownZero.isNegative()) 1125 return Tmp2; 1126 1127 // Otherwise, we treat this like a SUB. 1128 } 1129 1130 // Sub can have at most one carry bit. Thus we know that the output 1131 // is, at worst, one more bit than the inputs. 1132 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1133 if (Tmp == 1) return 1; // Early out. 1134 return std::min(Tmp, Tmp2)-1; 1135 1136 case Instruction::PHI: { 1137 PHINode *PN = cast<PHINode>(U); 1138 // Don't analyze large in-degree PHIs. 1139 if (PN->getNumIncomingValues() > 4) break; 1140 1141 // Take the minimum of all incoming values. This can't infinitely loop 1142 // because of our depth threshold. 1143 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1); 1144 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 1145 if (Tmp == 1) return Tmp; 1146 Tmp = std::min(Tmp, 1147 ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1)); 1148 } 1149 return Tmp; 1150 } 1151 1152 case Instruction::Trunc: 1153 // FIXME: it's tricky to do anything useful for this, but it is an important 1154 // case for targets like X86. 1155 break; 1156 } 1157 1158 // Finally, if we can prove that the top bits of the result are 0's or 1's, 1159 // use this information. 1160 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1161 APInt Mask; 1162 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 1163 1164 if (KnownZero.isNegative()) { // sign bit is 0 1165 Mask = KnownZero; 1166 } else if (KnownOne.isNegative()) { // sign bit is 1; 1167 Mask = KnownOne; 1168 } else { 1169 // Nothing known. 1170 return FirstAnswer; 1171 } 1172 1173 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 1174 // the number of identical bits in the top of the input value. 1175 Mask = ~Mask; 1176 Mask <<= Mask.getBitWidth()-TyBits; 1177 // Return # leading zeros. We use 'min' here in case Val was zero before 1178 // shifting. We don't want to return '64' as for an i32 "0". 1179 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 1180} 1181 1182/// ComputeMultiple - This function computes the integer multiple of Base that 1183/// equals V. If successful, it returns true and returns the multiple in 1184/// Multiple. If unsuccessful, it returns false. It looks 1185/// through SExt instructions only if LookThroughSExt is true. 1186bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 1187 bool LookThroughSExt, unsigned Depth) { 1188 const unsigned MaxDepth = 6; 1189 1190 assert(V && "No Value?"); 1191 assert(Depth <= MaxDepth && "Limit Search Depth"); 1192 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 1193 1194 Type *T = V->getType(); 1195 1196 ConstantInt *CI = dyn_cast<ConstantInt>(V); 1197 1198 if (Base == 0) 1199 return false; 1200 1201 if (Base == 1) { 1202 Multiple = V; 1203 return true; 1204 } 1205 1206 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 1207 Constant *BaseVal = ConstantInt::get(T, Base); 1208 if (CO && CO == BaseVal) { 1209 // Multiple is 1. 1210 Multiple = ConstantInt::get(T, 1); 1211 return true; 1212 } 1213 1214 if (CI && CI->getZExtValue() % Base == 0) { 1215 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 1216 return true; 1217 } 1218 1219 if (Depth == MaxDepth) return false; // Limit search depth. 1220 1221 Operator *I = dyn_cast<Operator>(V); 1222 if (!I) return false; 1223 1224 switch (I->getOpcode()) { 1225 default: break; 1226 case Instruction::SExt: 1227 if (!LookThroughSExt) return false; 1228 // otherwise fall through to ZExt 1229 case Instruction::ZExt: 1230 return ComputeMultiple(I->getOperand(0), Base, Multiple, 1231 LookThroughSExt, Depth+1); 1232 case Instruction::Shl: 1233 case Instruction::Mul: { 1234 Value *Op0 = I->getOperand(0); 1235 Value *Op1 = I->getOperand(1); 1236 1237 if (I->getOpcode() == Instruction::Shl) { 1238 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 1239 if (!Op1CI) return false; 1240 // Turn Op0 << Op1 into Op0 * 2^Op1 1241 APInt Op1Int = Op1CI->getValue(); 1242 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 1243 APInt API(Op1Int.getBitWidth(), 0); 1244 API.setBit(BitToSet); 1245 Op1 = ConstantInt::get(V->getContext(), API); 1246 } 1247 1248 Value *Mul0 = NULL; 1249 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 1250 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 1251 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 1252 if (Op1C->getType()->getPrimitiveSizeInBits() < 1253 MulC->getType()->getPrimitiveSizeInBits()) 1254 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 1255 if (Op1C->getType()->getPrimitiveSizeInBits() > 1256 MulC->getType()->getPrimitiveSizeInBits()) 1257 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 1258 1259 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 1260 Multiple = ConstantExpr::getMul(MulC, Op1C); 1261 return true; 1262 } 1263 1264 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 1265 if (Mul0CI->getValue() == 1) { 1266 // V == Base * Op1, so return Op1 1267 Multiple = Op1; 1268 return true; 1269 } 1270 } 1271 1272 Value *Mul1 = NULL; 1273 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 1274 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 1275 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 1276 if (Op0C->getType()->getPrimitiveSizeInBits() < 1277 MulC->getType()->getPrimitiveSizeInBits()) 1278 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 1279 if (Op0C->getType()->getPrimitiveSizeInBits() > 1280 MulC->getType()->getPrimitiveSizeInBits()) 1281 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 1282 1283 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 1284 Multiple = ConstantExpr::getMul(MulC, Op0C); 1285 return true; 1286 } 1287 1288 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 1289 if (Mul1CI->getValue() == 1) { 1290 // V == Base * Op0, so return Op0 1291 Multiple = Op0; 1292 return true; 1293 } 1294 } 1295 } 1296 } 1297 1298 // We could not determine if V is a multiple of Base. 1299 return false; 1300} 1301 1302/// CannotBeNegativeZero - Return true if we can prove that the specified FP 1303/// value is never equal to -0.0. 1304/// 1305/// NOTE: this function will need to be revisited when we support non-default 1306/// rounding modes! 1307/// 1308bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 1309 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 1310 return !CFP->getValueAPF().isNegZero(); 1311 1312 if (Depth == 6) 1313 return 1; // Limit search depth. 1314 1315 const Operator *I = dyn_cast<Operator>(V); 1316 if (I == 0) return false; 1317 1318 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 1319 if (I->getOpcode() == Instruction::FAdd && 1320 isa<ConstantFP>(I->getOperand(1)) && 1321 cast<ConstantFP>(I->getOperand(1))->isNullValue()) 1322 return true; 1323 1324 // sitofp and uitofp turn into +0.0 for zero. 1325 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 1326 return true; 1327 1328 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1329 // sqrt(-0.0) = -0.0, no other negative results are possible. 1330 if (II->getIntrinsicID() == Intrinsic::sqrt) 1331 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); 1332 1333 if (const CallInst *CI = dyn_cast<CallInst>(I)) 1334 if (const Function *F = CI->getCalledFunction()) { 1335 if (F->isDeclaration()) { 1336 // abs(x) != -0.0 1337 if (F->getName() == "abs") return true; 1338 // fabs[lf](x) != -0.0 1339 if (F->getName() == "fabs") return true; 1340 if (F->getName() == "fabsf") return true; 1341 if (F->getName() == "fabsl") return true; 1342 if (F->getName() == "sqrt" || F->getName() == "sqrtf" || 1343 F->getName() == "sqrtl") 1344 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); 1345 } 1346 } 1347 1348 return false; 1349} 1350 1351/// isBytewiseValue - If the specified value can be set by repeating the same 1352/// byte in memory, return the i8 value that it is represented with. This is 1353/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 1354/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 1355/// byte store (e.g. i16 0x1234), return null. 1356Value *llvm::isBytewiseValue(Value *V) { 1357 // All byte-wide stores are splatable, even of arbitrary variables. 1358 if (V->getType()->isIntegerTy(8)) return V; 1359 1360 // Handle 'null' ConstantArrayZero etc. 1361 if (Constant *C = dyn_cast<Constant>(V)) 1362 if (C->isNullValue()) 1363 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 1364 1365 // Constant float and double values can be handled as integer values if the 1366 // corresponding integer value is "byteable". An important case is 0.0. 1367 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 1368 if (CFP->getType()->isFloatTy()) 1369 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 1370 if (CFP->getType()->isDoubleTy()) 1371 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 1372 // Don't handle long double formats, which have strange constraints. 1373 } 1374 1375 // We can handle constant integers that are power of two in size and a 1376 // multiple of 8 bits. 1377 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 1378 unsigned Width = CI->getBitWidth(); 1379 if (isPowerOf2_32(Width) && Width > 8) { 1380 // We can handle this value if the recursive binary decomposition is the 1381 // same at all levels. 1382 APInt Val = CI->getValue(); 1383 APInt Val2; 1384 while (Val.getBitWidth() != 8) { 1385 unsigned NextWidth = Val.getBitWidth()/2; 1386 Val2 = Val.lshr(NextWidth); 1387 Val2 = Val2.trunc(Val.getBitWidth()/2); 1388 Val = Val.trunc(Val.getBitWidth()/2); 1389 1390 // If the top/bottom halves aren't the same, reject it. 1391 if (Val != Val2) 1392 return 0; 1393 } 1394 return ConstantInt::get(V->getContext(), Val); 1395 } 1396 } 1397 1398 // A ConstantDataArray/Vector is splatable if all its members are equal and 1399 // also splatable. 1400 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 1401 Value *Elt = CA->getElementAsConstant(0); 1402 Value *Val = isBytewiseValue(Elt); 1403 if (!Val) 1404 return 0; 1405 1406 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 1407 if (CA->getElementAsConstant(I) != Elt) 1408 return 0; 1409 1410 return Val; 1411 } 1412 1413 // Conceptually, we could handle things like: 1414 // %a = zext i8 %X to i16 1415 // %b = shl i16 %a, 8 1416 // %c = or i16 %a, %b 1417 // but until there is an example that actually needs this, it doesn't seem 1418 // worth worrying about. 1419 return 0; 1420} 1421 1422 1423// This is the recursive version of BuildSubAggregate. It takes a few different 1424// arguments. Idxs is the index within the nested struct From that we are 1425// looking at now (which is of type IndexedType). IdxSkip is the number of 1426// indices from Idxs that should be left out when inserting into the resulting 1427// struct. To is the result struct built so far, new insertvalue instructions 1428// build on that. 1429static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 1430 SmallVector<unsigned, 10> &Idxs, 1431 unsigned IdxSkip, 1432 Instruction *InsertBefore) { 1433 llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType); 1434 if (STy) { 1435 // Save the original To argument so we can modify it 1436 Value *OrigTo = To; 1437 // General case, the type indexed by Idxs is a struct 1438 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1439 // Process each struct element recursively 1440 Idxs.push_back(i); 1441 Value *PrevTo = To; 1442 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 1443 InsertBefore); 1444 Idxs.pop_back(); 1445 if (!To) { 1446 // Couldn't find any inserted value for this index? Cleanup 1447 while (PrevTo != OrigTo) { 1448 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 1449 PrevTo = Del->getAggregateOperand(); 1450 Del->eraseFromParent(); 1451 } 1452 // Stop processing elements 1453 break; 1454 } 1455 } 1456 // If we successfully found a value for each of our subaggregates 1457 if (To) 1458 return To; 1459 } 1460 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 1461 // the struct's elements had a value that was inserted directly. In the latter 1462 // case, perhaps we can't determine each of the subelements individually, but 1463 // we might be able to find the complete struct somewhere. 1464 1465 // Find the value that is at that particular spot 1466 Value *V = FindInsertedValue(From, Idxs); 1467 1468 if (!V) 1469 return NULL; 1470 1471 // Insert the value in the new (sub) aggregrate 1472 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 1473 "tmp", InsertBefore); 1474} 1475 1476// This helper takes a nested struct and extracts a part of it (which is again a 1477// struct) into a new value. For example, given the struct: 1478// { a, { b, { c, d }, e } } 1479// and the indices "1, 1" this returns 1480// { c, d }. 1481// 1482// It does this by inserting an insertvalue for each element in the resulting 1483// struct, as opposed to just inserting a single struct. This will only work if 1484// each of the elements of the substruct are known (ie, inserted into From by an 1485// insertvalue instruction somewhere). 1486// 1487// All inserted insertvalue instructions are inserted before InsertBefore 1488static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 1489 Instruction *InsertBefore) { 1490 assert(InsertBefore && "Must have someplace to insert!"); 1491 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 1492 idx_range); 1493 Value *To = UndefValue::get(IndexedType); 1494 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 1495 unsigned IdxSkip = Idxs.size(); 1496 1497 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 1498} 1499 1500/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if 1501/// the scalar value indexed is already around as a register, for example if it 1502/// were inserted directly into the aggregrate. 1503/// 1504/// If InsertBefore is not null, this function will duplicate (modified) 1505/// insertvalues when a part of a nested struct is extracted. 1506Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 1507 Instruction *InsertBefore) { 1508 // Nothing to index? Just return V then (this is useful at the end of our 1509 // recursion). 1510 if (idx_range.empty()) 1511 return V; 1512 // We have indices, so V should have an indexable type. 1513 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 1514 "Not looking at a struct or array?"); 1515 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 1516 "Invalid indices for type?"); 1517 1518 if (Constant *C = dyn_cast<Constant>(V)) { 1519 C = C->getAggregateElement(idx_range[0]); 1520 if (C == 0) return 0; 1521 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 1522 } 1523 1524 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 1525 // Loop the indices for the insertvalue instruction in parallel with the 1526 // requested indices 1527 const unsigned *req_idx = idx_range.begin(); 1528 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 1529 i != e; ++i, ++req_idx) { 1530 if (req_idx == idx_range.end()) { 1531 // We can't handle this without inserting insertvalues 1532 if (!InsertBefore) 1533 return 0; 1534 1535 // The requested index identifies a part of a nested aggregate. Handle 1536 // this specially. For example, 1537 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 1538 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 1539 // %C = extractvalue {i32, { i32, i32 } } %B, 1 1540 // This can be changed into 1541 // %A = insertvalue {i32, i32 } undef, i32 10, 0 1542 // %C = insertvalue {i32, i32 } %A, i32 11, 1 1543 // which allows the unused 0,0 element from the nested struct to be 1544 // removed. 1545 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 1546 InsertBefore); 1547 } 1548 1549 // This insert value inserts something else than what we are looking for. 1550 // See if the (aggregrate) value inserted into has the value we are 1551 // looking for, then. 1552 if (*req_idx != *i) 1553 return FindInsertedValue(I->getAggregateOperand(), idx_range, 1554 InsertBefore); 1555 } 1556 // If we end up here, the indices of the insertvalue match with those 1557 // requested (though possibly only partially). Now we recursively look at 1558 // the inserted value, passing any remaining indices. 1559 return FindInsertedValue(I->getInsertedValueOperand(), 1560 makeArrayRef(req_idx, idx_range.end()), 1561 InsertBefore); 1562 } 1563 1564 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 1565 // If we're extracting a value from an aggregrate that was extracted from 1566 // something else, we can extract from that something else directly instead. 1567 // However, we will need to chain I's indices with the requested indices. 1568 1569 // Calculate the number of indices required 1570 unsigned size = I->getNumIndices() + idx_range.size(); 1571 // Allocate some space to put the new indices in 1572 SmallVector<unsigned, 5> Idxs; 1573 Idxs.reserve(size); 1574 // Add indices from the extract value instruction 1575 Idxs.append(I->idx_begin(), I->idx_end()); 1576 1577 // Add requested indices 1578 Idxs.append(idx_range.begin(), idx_range.end()); 1579 1580 assert(Idxs.size() == size 1581 && "Number of indices added not correct?"); 1582 1583 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 1584 } 1585 // Otherwise, we don't know (such as, extracting from a function return value 1586 // or load instruction) 1587 return 0; 1588} 1589 1590/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if 1591/// it can be expressed as a base pointer plus a constant offset. Return the 1592/// base and offset to the caller. 1593Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 1594 const DataLayout &TD) { 1595 Operator *PtrOp = dyn_cast<Operator>(Ptr); 1596 if (PtrOp == 0 || Ptr->getType()->isVectorTy()) 1597 return Ptr; 1598 1599 // Just look through bitcasts. 1600 if (PtrOp->getOpcode() == Instruction::BitCast) 1601 return GetPointerBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 1602 1603 // If this is a GEP with constant indices, we can look through it. 1604 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 1605 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 1606 1607 gep_type_iterator GTI = gep_type_begin(GEP); 1608 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 1609 ++I, ++GTI) { 1610 ConstantInt *OpC = cast<ConstantInt>(*I); 1611 if (OpC->isZero()) continue; 1612 1613 // Handle a struct and array indices which add their offset to the pointer. 1614 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1615 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 1616 } else { 1617 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 1618 Offset += OpC->getSExtValue()*Size; 1619 } 1620 } 1621 1622 // Re-sign extend from the pointer size if needed to get overflow edge cases 1623 // right. 1624 unsigned PtrSize = TD.getPointerSizeInBits(); 1625 if (PtrSize < 64) 1626 Offset = SignExtend64(Offset, PtrSize); 1627 1628 return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 1629} 1630 1631 1632/// getConstantStringInfo - This function computes the length of a 1633/// null-terminated C string pointed to by V. If successful, it returns true 1634/// and returns the string in Str. If unsuccessful, it returns false. 1635bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 1636 uint64_t Offset, bool TrimAtNul) { 1637 assert(V); 1638 1639 // Look through bitcast instructions and geps. 1640 V = V->stripPointerCasts(); 1641 1642 // If the value is a GEP instructionor constant expression, treat it as an 1643 // offset. 1644 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1645 // Make sure the GEP has exactly three arguments. 1646 if (GEP->getNumOperands() != 3) 1647 return false; 1648 1649 // Make sure the index-ee is a pointer to array of i8. 1650 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 1651 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 1652 if (AT == 0 || !AT->getElementType()->isIntegerTy(8)) 1653 return false; 1654 1655 // Check to make sure that the first operand of the GEP is an integer and 1656 // has value 0 so that we are sure we're indexing into the initializer. 1657 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 1658 if (FirstIdx == 0 || !FirstIdx->isZero()) 1659 return false; 1660 1661 // If the second index isn't a ConstantInt, then this is a variable index 1662 // into the array. If this occurs, we can't say anything meaningful about 1663 // the string. 1664 uint64_t StartIdx = 0; 1665 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 1666 StartIdx = CI->getZExtValue(); 1667 else 1668 return false; 1669 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset); 1670 } 1671 1672 // The GEP instruction, constant or instruction, must reference a global 1673 // variable that is a constant and is initialized. The referenced constant 1674 // initializer is the array that we'll use for optimization. 1675 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 1676 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 1677 return false; 1678 1679 // Handle the all-zeros case 1680 if (GV->getInitializer()->isNullValue()) { 1681 // This is a degenerate case. The initializer is constant zero so the 1682 // length of the string must be zero. 1683 Str = ""; 1684 return true; 1685 } 1686 1687 // Must be a Constant Array 1688 const ConstantDataArray *Array = 1689 dyn_cast<ConstantDataArray>(GV->getInitializer()); 1690 if (Array == 0 || !Array->isString()) 1691 return false; 1692 1693 // Get the number of elements in the array 1694 uint64_t NumElts = Array->getType()->getArrayNumElements(); 1695 1696 // Start out with the entire array in the StringRef. 1697 Str = Array->getAsString(); 1698 1699 if (Offset > NumElts) 1700 return false; 1701 1702 // Skip over 'offset' bytes. 1703 Str = Str.substr(Offset); 1704 1705 if (TrimAtNul) { 1706 // Trim off the \0 and anything after it. If the array is not nul 1707 // terminated, we just return the whole end of string. The client may know 1708 // some other way that the string is length-bound. 1709 Str = Str.substr(0, Str.find('\0')); 1710 } 1711 return true; 1712} 1713 1714// These next two are very similar to the above, but also look through PHI 1715// nodes. 1716// TODO: See if we can integrate these two together. 1717 1718/// GetStringLengthH - If we can compute the length of the string pointed to by 1719/// the specified pointer, return 'len+1'. If we can't, return 0. 1720static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) { 1721 // Look through noop bitcast instructions. 1722 V = V->stripPointerCasts(); 1723 1724 // If this is a PHI node, there are two cases: either we have already seen it 1725 // or we haven't. 1726 if (PHINode *PN = dyn_cast<PHINode>(V)) { 1727 if (!PHIs.insert(PN)) 1728 return ~0ULL; // already in the set. 1729 1730 // If it was new, see if all the input strings are the same length. 1731 uint64_t LenSoFar = ~0ULL; 1732 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1733 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs); 1734 if (Len == 0) return 0; // Unknown length -> unknown. 1735 1736 if (Len == ~0ULL) continue; 1737 1738 if (Len != LenSoFar && LenSoFar != ~0ULL) 1739 return 0; // Disagree -> unknown. 1740 LenSoFar = Len; 1741 } 1742 1743 // Success, all agree. 1744 return LenSoFar; 1745 } 1746 1747 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 1748 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1749 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 1750 if (Len1 == 0) return 0; 1751 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 1752 if (Len2 == 0) return 0; 1753 if (Len1 == ~0ULL) return Len2; 1754 if (Len2 == ~0ULL) return Len1; 1755 if (Len1 != Len2) return 0; 1756 return Len1; 1757 } 1758 1759 // Otherwise, see if we can read the string. 1760 StringRef StrData; 1761 if (!getConstantStringInfo(V, StrData)) 1762 return 0; 1763 1764 return StrData.size()+1; 1765} 1766 1767/// GetStringLength - If we can compute the length of the string pointed to by 1768/// the specified pointer, return 'len+1'. If we can't, return 0. 1769uint64_t llvm::GetStringLength(Value *V) { 1770 if (!V->getType()->isPointerTy()) return 0; 1771 1772 SmallPtrSet<PHINode*, 32> PHIs; 1773 uint64_t Len = GetStringLengthH(V, PHIs); 1774 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 1775 // an empty string as a length. 1776 return Len == ~0ULL ? 1 : Len; 1777} 1778 1779Value * 1780llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) { 1781 if (!V->getType()->isPointerTy()) 1782 return V; 1783 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 1784 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1785 V = GEP->getPointerOperand(); 1786 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 1787 V = cast<Operator>(V)->getOperand(0); 1788 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1789 if (GA->mayBeOverridden()) 1790 return V; 1791 V = GA->getAliasee(); 1792 } else { 1793 // See if InstructionSimplify knows any relevant tricks. 1794 if (Instruction *I = dyn_cast<Instruction>(V)) 1795 // TODO: Acquire a DominatorTree and use it. 1796 if (Value *Simplified = SimplifyInstruction(I, TD, 0)) { 1797 V = Simplified; 1798 continue; 1799 } 1800 1801 return V; 1802 } 1803 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 1804 } 1805 return V; 1806} 1807 1808void 1809llvm::GetUnderlyingObjects(Value *V, 1810 SmallVectorImpl<Value *> &Objects, 1811 const DataLayout *TD, 1812 unsigned MaxLookup) { 1813 SmallPtrSet<Value *, 4> Visited; 1814 SmallVector<Value *, 4> Worklist; 1815 Worklist.push_back(V); 1816 do { 1817 Value *P = Worklist.pop_back_val(); 1818 P = GetUnderlyingObject(P, TD, MaxLookup); 1819 1820 if (!Visited.insert(P)) 1821 continue; 1822 1823 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 1824 Worklist.push_back(SI->getTrueValue()); 1825 Worklist.push_back(SI->getFalseValue()); 1826 continue; 1827 } 1828 1829 if (PHINode *PN = dyn_cast<PHINode>(P)) { 1830 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1831 Worklist.push_back(PN->getIncomingValue(i)); 1832 continue; 1833 } 1834 1835 Objects.push_back(P); 1836 } while (!Worklist.empty()); 1837} 1838 1839/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer 1840/// are lifetime markers. 1841/// 1842bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 1843 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 1844 UI != UE; ++UI) { 1845 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI); 1846 if (!II) return false; 1847 1848 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1849 II->getIntrinsicID() != Intrinsic::lifetime_end) 1850 return false; 1851 } 1852 return true; 1853} 1854 1855bool llvm::isSafeToSpeculativelyExecute(const Value *V, 1856 const DataLayout *TD) { 1857 const Operator *Inst = dyn_cast<Operator>(V); 1858 if (!Inst) 1859 return false; 1860 1861 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 1862 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 1863 if (C->canTrap()) 1864 return false; 1865 1866 switch (Inst->getOpcode()) { 1867 default: 1868 return true; 1869 case Instruction::UDiv: 1870 case Instruction::URem: 1871 // x / y is undefined if y == 0, but calcuations like x / 3 are safe. 1872 return isKnownNonZero(Inst->getOperand(1), TD); 1873 case Instruction::SDiv: 1874 case Instruction::SRem: { 1875 Value *Op = Inst->getOperand(1); 1876 // x / y is undefined if y == 0 1877 if (!isKnownNonZero(Op, TD)) 1878 return false; 1879 // x / y might be undefined if y == -1 1880 unsigned BitWidth = getBitWidth(Op->getType(), TD); 1881 if (BitWidth == 0) 1882 return false; 1883 APInt KnownZero(BitWidth, 0); 1884 APInt KnownOne(BitWidth, 0); 1885 ComputeMaskedBits(Op, KnownZero, KnownOne, TD); 1886 return !!KnownZero; 1887 } 1888 case Instruction::Load: { 1889 const LoadInst *LI = cast<LoadInst>(Inst); 1890 if (!LI->isUnordered()) 1891 return false; 1892 return LI->getPointerOperand()->isDereferenceablePointer(); 1893 } 1894 case Instruction::Call: { 1895 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 1896 switch (II->getIntrinsicID()) { 1897 // These synthetic intrinsics have no side-effects, and just mark 1898 // information about their operands. 1899 // FIXME: There are other no-op synthetic instructions that potentially 1900 // should be considered at least *safe* to speculate... 1901 case Intrinsic::dbg_declare: 1902 case Intrinsic::dbg_value: 1903 return true; 1904 1905 case Intrinsic::bswap: 1906 case Intrinsic::ctlz: 1907 case Intrinsic::ctpop: 1908 case Intrinsic::cttz: 1909 case Intrinsic::objectsize: 1910 case Intrinsic::sadd_with_overflow: 1911 case Intrinsic::smul_with_overflow: 1912 case Intrinsic::ssub_with_overflow: 1913 case Intrinsic::uadd_with_overflow: 1914 case Intrinsic::umul_with_overflow: 1915 case Intrinsic::usub_with_overflow: 1916 return true; 1917 // TODO: some fp intrinsics are marked as having the same error handling 1918 // as libm. They're safe to speculate when they won't error. 1919 // TODO: are convert_{from,to}_fp16 safe? 1920 // TODO: can we list target-specific intrinsics here? 1921 default: break; 1922 } 1923 } 1924 return false; // The called function could have undefined behavior or 1925 // side-effects, even if marked readnone nounwind. 1926 } 1927 case Instruction::VAArg: 1928 case Instruction::Alloca: 1929 case Instruction::Invoke: 1930 case Instruction::PHI: 1931 case Instruction::Store: 1932 case Instruction::Ret: 1933 case Instruction::Br: 1934 case Instruction::IndirectBr: 1935 case Instruction::Switch: 1936 case Instruction::Unreachable: 1937 case Instruction::Fence: 1938 case Instruction::LandingPad: 1939 case Instruction::AtomicRMW: 1940 case Instruction::AtomicCmpXchg: 1941 case Instruction::Resume: 1942 return false; // Misc instructions which have effects 1943 } 1944} 1945