1//===- ValueTracking.cpp - Walk computations to compute properties --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains routines that help analyze properties that chains of 11// computations have. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Analysis/ValueTracking.h" 16#include "llvm/Analysis/InstructionSimplify.h" 17#include "llvm/Constants.h" 18#include "llvm/Instructions.h" 19#include "llvm/GlobalVariable.h" 20#include "llvm/GlobalAlias.h" 21#include "llvm/IntrinsicInst.h" 22#include "llvm/LLVMContext.h" 23#include "llvm/Metadata.h" 24#include "llvm/Operator.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Support/ConstantRange.h" 27#include "llvm/Support/GetElementPtrTypeIterator.h" 28#include "llvm/Support/MathExtras.h" 29#include "llvm/Support/PatternMatch.h" 30#include "llvm/ADT/SmallPtrSet.h" 31#include <cstring> 32using namespace llvm; 33using namespace llvm::PatternMatch; 34 35const unsigned MaxDepth = 6; 36 37/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if 38/// unknown returns 0). For vector types, returns the element type's bitwidth. 39static unsigned getBitWidth(Type *Ty, const TargetData *TD) { 40 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 41 return BitWidth; 42 assert(isa<PointerType>(Ty) && "Expected a pointer type!"); 43 return TD ? TD->getPointerSizeInBits() : 0; 44} 45 46static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 47 APInt &KnownZero, APInt &KnownOne, 48 APInt &KnownZero2, APInt &KnownOne2, 49 const TargetData *TD, unsigned Depth) { 50 if (!Add) { 51 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 52 // We know that the top bits of C-X are clear if X contains less bits 53 // than C (i.e. no wrap-around can happen). For example, 20-X is 54 // positive if we can prove that X is >= 0 and < 16. 55 if (!CLHS->getValue().isNegative()) { 56 unsigned BitWidth = KnownZero.getBitWidth(); 57 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 58 // NLZ can't be BitWidth with no sign bit 59 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 60 llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1); 61 62 // If all of the MaskV bits are known to be zero, then we know the 63 // output top bits are zero, because we now know that the output is 64 // from [0-C]. 65 if ((KnownZero2 & MaskV) == MaskV) { 66 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 67 // Top bits known zero. 68 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 69 } 70 } 71 } 72 } 73 74 unsigned BitWidth = KnownZero.getBitWidth(); 75 76 // If one of the operands has trailing zeros, then the bits that the 77 // other operand has in those bit positions will be preserved in the 78 // result. For an add, this works with either operand. For a subtract, 79 // this only works if the known zeros are in the right operand. 80 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 81 llvm::ComputeMaskedBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1); 82 assert((LHSKnownZero & LHSKnownOne) == 0 && 83 "Bits known to be one AND zero?"); 84 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); 85 86 llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1); 87 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 88 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); 89 90 // Determine which operand has more trailing zeros, and use that 91 // many bits from the other operand. 92 if (LHSKnownZeroOut > RHSKnownZeroOut) { 93 if (Add) { 94 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); 95 KnownZero |= KnownZero2 & Mask; 96 KnownOne |= KnownOne2 & Mask; 97 } else { 98 // If the known zeros are in the left operand for a subtract, 99 // fall back to the minimum known zeros in both operands. 100 KnownZero |= APInt::getLowBitsSet(BitWidth, 101 std::min(LHSKnownZeroOut, 102 RHSKnownZeroOut)); 103 } 104 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { 105 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); 106 KnownZero |= LHSKnownZero & Mask; 107 KnownOne |= LHSKnownOne & Mask; 108 } 109 110 // Are we still trying to solve for the sign bit? 111 if (!KnownZero.isNegative() && !KnownOne.isNegative()) { 112 if (NSW) { 113 if (Add) { 114 // Adding two positive numbers can't wrap into negative 115 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 116 KnownZero |= APInt::getSignBit(BitWidth); 117 // and adding two negative numbers can't wrap into positive. 118 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 119 KnownOne |= APInt::getSignBit(BitWidth); 120 } else { 121 // Subtracting a negative number from a positive one can't wrap 122 if (LHSKnownZero.isNegative() && KnownOne2.isNegative()) 123 KnownZero |= APInt::getSignBit(BitWidth); 124 // neither can subtracting a positive number from a negative one. 125 else if (LHSKnownOne.isNegative() && KnownZero2.isNegative()) 126 KnownOne |= APInt::getSignBit(BitWidth); 127 } 128 } 129 } 130} 131 132static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW, 133 APInt &KnownZero, APInt &KnownOne, 134 APInt &KnownZero2, APInt &KnownOne2, 135 const TargetData *TD, unsigned Depth) { 136 unsigned BitWidth = KnownZero.getBitWidth(); 137 ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1); 138 ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1); 139 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 140 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 141 142 bool isKnownNegative = false; 143 bool isKnownNonNegative = false; 144 // If the multiplication is known not to overflow, compute the sign bit. 145 if (NSW) { 146 if (Op0 == Op1) { 147 // The product of a number with itself is non-negative. 148 isKnownNonNegative = true; 149 } else { 150 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 151 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 152 bool isKnownNegativeOp1 = KnownOne.isNegative(); 153 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 154 // The product of two numbers with the same sign is non-negative. 155 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 156 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 157 // The product of a negative number and a non-negative number is either 158 // negative or zero. 159 if (!isKnownNonNegative) 160 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 161 isKnownNonZero(Op0, TD, Depth)) || 162 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 163 isKnownNonZero(Op1, TD, Depth)); 164 } 165 } 166 167 // If low bits are zero in either operand, output low known-0 bits. 168 // Also compute a conserative estimate for high known-0 bits. 169 // More trickiness is possible, but this is sufficient for the 170 // interesting case of alignment computation. 171 KnownOne.clearAllBits(); 172 unsigned TrailZ = KnownZero.countTrailingOnes() + 173 KnownZero2.countTrailingOnes(); 174 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 175 KnownZero2.countLeadingOnes(), 176 BitWidth) - BitWidth; 177 178 TrailZ = std::min(TrailZ, BitWidth); 179 LeadZ = std::min(LeadZ, BitWidth); 180 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 181 APInt::getHighBitsSet(BitWidth, LeadZ); 182 183 // Only make use of no-wrap flags if we failed to compute the sign bit 184 // directly. This matters if the multiplication always overflows, in 185 // which case we prefer to follow the result of the direct computation, 186 // though as the program is invoking undefined behaviour we can choose 187 // whatever we like here. 188 if (isKnownNonNegative && !KnownOne.isNegative()) 189 KnownZero.setBit(BitWidth - 1); 190 else if (isKnownNegative && !KnownZero.isNegative()) 191 KnownOne.setBit(BitWidth - 1); 192} 193 194void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) { 195 unsigned BitWidth = KnownZero.getBitWidth(); 196 unsigned NumRanges = Ranges.getNumOperands() / 2; 197 assert(NumRanges >= 1); 198 199 // Use the high end of the ranges to find leading zeros. 200 unsigned MinLeadingZeros = BitWidth; 201 for (unsigned i = 0; i < NumRanges; ++i) { 202 ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0)); 203 ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1)); 204 ConstantRange Range(Lower->getValue(), Upper->getValue()); 205 if (Range.isWrappedSet()) 206 MinLeadingZeros = 0; // -1 has no zeros 207 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros(); 208 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros); 209 } 210 211 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros); 212} 213/// ComputeMaskedBits - Determine which of the bits are known to be either zero 214/// or one and return them in the KnownZero/KnownOne bit sets. 215/// 216/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 217/// we cannot optimize based on the assumption that it is zero without changing 218/// it to be an explicit zero. If we don't change it to zero, other code could 219/// optimized based on the contradictory assumption that it is non-zero. 220/// Because instcombine aggressively folds operations with undef args anyway, 221/// this won't lose us code quality. 222/// 223/// This function is defined on values with integer type, values with pointer 224/// type (but only if TD is non-null), and vectors of integers. In the case 225/// where V is a vector, known zero, and known one values are the 226/// same width as the vector element, and the bit is set only if it is true 227/// for all of the elements in the vector. 228void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, 229 const TargetData *TD, unsigned Depth) { 230 assert(V && "No Value?"); 231 assert(Depth <= MaxDepth && "Limit Search Depth"); 232 unsigned BitWidth = KnownZero.getBitWidth(); 233 234 assert((V->getType()->isIntOrIntVectorTy() || 235 V->getType()->getScalarType()->isPointerTy()) && 236 "Not integer or pointer type!"); 237 assert((!TD || 238 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 239 (!V->getType()->isIntOrIntVectorTy() || 240 V->getType()->getScalarSizeInBits() == BitWidth) && 241 KnownZero.getBitWidth() == BitWidth && 242 KnownOne.getBitWidth() == BitWidth && 243 "V, Mask, KnownOne and KnownZero should have same BitWidth"); 244 245 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 246 // We know all of the bits for a constant! 247 KnownOne = CI->getValue(); 248 KnownZero = ~KnownOne; 249 return; 250 } 251 // Null and aggregate-zero are all-zeros. 252 if (isa<ConstantPointerNull>(V) || 253 isa<ConstantAggregateZero>(V)) { 254 KnownOne.clearAllBits(); 255 KnownZero = APInt::getAllOnesValue(BitWidth); 256 return; 257 } 258 // Handle a constant vector by taking the intersection of the known bits of 259 // each element. There is no real need to handle ConstantVector here, because 260 // we don't handle undef in any particularly useful way. 261 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 262 // We know that CDS must be a vector of integers. Take the intersection of 263 // each element. 264 KnownZero.setAllBits(); KnownOne.setAllBits(); 265 APInt Elt(KnownZero.getBitWidth(), 0); 266 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 267 Elt = CDS->getElementAsInteger(i); 268 KnownZero &= ~Elt; 269 KnownOne &= Elt; 270 } 271 return; 272 } 273 274 // The address of an aligned GlobalValue has trailing zeros. 275 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 276 unsigned Align = GV->getAlignment(); 277 if (Align == 0 && TD) { 278 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) { 279 Type *ObjectType = GVar->getType()->getElementType(); 280 if (ObjectType->isSized()) { 281 // If the object is defined in the current Module, we'll be giving 282 // it the preferred alignment. Otherwise, we have to assume that it 283 // may only have the minimum ABI alignment. 284 if (!GVar->isDeclaration() && !GVar->isWeakForLinker()) 285 Align = TD->getPreferredAlignment(GVar); 286 else 287 Align = TD->getABITypeAlignment(ObjectType); 288 } 289 } 290 } 291 if (Align > 0) 292 KnownZero = APInt::getLowBitsSet(BitWidth, 293 CountTrailingZeros_32(Align)); 294 else 295 KnownZero.clearAllBits(); 296 KnownOne.clearAllBits(); 297 return; 298 } 299 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 300 // the bits of its aliasee. 301 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 302 if (GA->mayBeOverridden()) { 303 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 304 } else { 305 ComputeMaskedBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1); 306 } 307 return; 308 } 309 310 if (Argument *A = dyn_cast<Argument>(V)) { 311 // Get alignment information off byval arguments if specified in the IR. 312 if (A->hasByValAttr()) 313 if (unsigned Align = A->getParamAlignment()) 314 KnownZero = APInt::getLowBitsSet(BitWidth, 315 CountTrailingZeros_32(Align)); 316 return; 317 } 318 319 // Start out not knowing anything. 320 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 321 322 if (Depth == MaxDepth) 323 return; // Limit search depth. 324 325 Operator *I = dyn_cast<Operator>(V); 326 if (!I) return; 327 328 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 329 switch (I->getOpcode()) { 330 default: break; 331 case Instruction::Load: 332 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 333 computeMaskedBitsLoad(*MD, KnownZero); 334 return; 335 case Instruction::And: { 336 // If either the LHS or the RHS are Zero, the result is zero. 337 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 338 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 339 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 340 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 341 342 // Output known-1 bits are only known if set in both the LHS & RHS. 343 KnownOne &= KnownOne2; 344 // Output known-0 are known to be clear if zero in either the LHS | RHS. 345 KnownZero |= KnownZero2; 346 return; 347 } 348 case Instruction::Or: { 349 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 350 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 351 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 352 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 353 354 // Output known-0 bits are only known if clear in both the LHS & RHS. 355 KnownZero &= KnownZero2; 356 // Output known-1 are known to be set if set in either the LHS | RHS. 357 KnownOne |= KnownOne2; 358 return; 359 } 360 case Instruction::Xor: { 361 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 362 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 363 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 364 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 365 366 // Output known-0 bits are known if clear or set in both the LHS & RHS. 367 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 368 // Output known-1 are known to be set if set in only one of the LHS, RHS. 369 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 370 KnownZero = KnownZeroOut; 371 return; 372 } 373 case Instruction::Mul: { 374 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 375 ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW, 376 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth); 377 break; 378 } 379 case Instruction::UDiv: { 380 // For the purposes of computing leading zeros we can conservatively 381 // treat a udiv as a logical right shift by the power of 2 known to 382 // be less than the denominator. 383 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 384 unsigned LeadZ = KnownZero2.countLeadingOnes(); 385 386 KnownOne2.clearAllBits(); 387 KnownZero2.clearAllBits(); 388 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1); 389 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 390 if (RHSUnknownLeadingOnes != BitWidth) 391 LeadZ = std::min(BitWidth, 392 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 393 394 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 395 return; 396 } 397 case Instruction::Select: 398 ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1); 399 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, 400 Depth+1); 401 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 402 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 403 404 // Only known if known in both the LHS and RHS. 405 KnownOne &= KnownOne2; 406 KnownZero &= KnownZero2; 407 return; 408 case Instruction::FPTrunc: 409 case Instruction::FPExt: 410 case Instruction::FPToUI: 411 case Instruction::FPToSI: 412 case Instruction::SIToFP: 413 case Instruction::UIToFP: 414 return; // Can't work with floating point. 415 case Instruction::PtrToInt: 416 case Instruction::IntToPtr: 417 // We can't handle these if we don't know the pointer size. 418 if (!TD) return; 419 // FALL THROUGH and handle them the same as zext/trunc. 420 case Instruction::ZExt: 421 case Instruction::Trunc: { 422 Type *SrcTy = I->getOperand(0)->getType(); 423 424 unsigned SrcBitWidth; 425 // Note that we handle pointer operands here because of inttoptr/ptrtoint 426 // which fall through here. 427 if (SrcTy->isPointerTy()) 428 SrcBitWidth = TD->getTypeSizeInBits(SrcTy); 429 else 430 SrcBitWidth = SrcTy->getScalarSizeInBits(); 431 432 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 433 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 434 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 435 KnownZero = KnownZero.zextOrTrunc(BitWidth); 436 KnownOne = KnownOne.zextOrTrunc(BitWidth); 437 // Any top bits are known to be zero. 438 if (BitWidth > SrcBitWidth) 439 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 440 return; 441 } 442 case Instruction::BitCast: { 443 Type *SrcTy = I->getOperand(0)->getType(); 444 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 445 // TODO: For now, not handling conversions like: 446 // (bitcast i64 %x to <2 x i32>) 447 !I->getType()->isVectorTy()) { 448 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 449 return; 450 } 451 break; 452 } 453 case Instruction::SExt: { 454 // Compute the bits in the result that are not present in the input. 455 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 456 457 KnownZero = KnownZero.trunc(SrcBitWidth); 458 KnownOne = KnownOne.trunc(SrcBitWidth); 459 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 460 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 461 KnownZero = KnownZero.zext(BitWidth); 462 KnownOne = KnownOne.zext(BitWidth); 463 464 // If the sign bit of the input is known set or clear, then we know the 465 // top bits of the result. 466 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 467 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 468 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 469 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 470 return; 471 } 472 case Instruction::Shl: 473 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 474 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 475 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 476 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 477 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 478 KnownZero <<= ShiftAmt; 479 KnownOne <<= ShiftAmt; 480 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 481 return; 482 } 483 break; 484 case Instruction::LShr: 485 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 486 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 487 // Compute the new bits that are at the top now. 488 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 489 490 // Unsigned shift right. 491 ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1); 492 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 493 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 494 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 495 // high bits known zero. 496 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 497 return; 498 } 499 break; 500 case Instruction::AShr: 501 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 502 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 503 // Compute the new bits that are at the top now. 504 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 505 506 // Signed shift right. 507 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 508 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 509 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 510 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 511 512 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 513 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 514 KnownZero |= HighBits; 515 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 516 KnownOne |= HighBits; 517 return; 518 } 519 break; 520 case Instruction::Sub: { 521 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 522 ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 523 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 524 Depth); 525 break; 526 } 527 case Instruction::Add: { 528 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 529 ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 530 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 531 Depth); 532 break; 533 } 534 case Instruction::SRem: 535 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 536 APInt RA = Rem->getValue().abs(); 537 if (RA.isPowerOf2()) { 538 APInt LowBits = RA - 1; 539 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 540 541 // The low bits of the first operand are unchanged by the srem. 542 KnownZero = KnownZero2 & LowBits; 543 KnownOne = KnownOne2 & LowBits; 544 545 // If the first operand is non-negative or has all low bits zero, then 546 // the upper bits are all zero. 547 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 548 KnownZero |= ~LowBits; 549 550 // If the first operand is negative and not all low bits are zero, then 551 // the upper bits are all one. 552 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 553 KnownOne |= ~LowBits; 554 555 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 556 } 557 } 558 559 // The sign bit is the LHS's sign bit, except when the result of the 560 // remainder is zero. 561 if (KnownZero.isNonNegative()) { 562 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 563 ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD, 564 Depth+1); 565 // If it's known zero, our sign bit is also zero. 566 if (LHSKnownZero.isNegative()) 567 KnownZero.setBit(BitWidth - 1); 568 } 569 570 break; 571 case Instruction::URem: { 572 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 573 APInt RA = Rem->getValue(); 574 if (RA.isPowerOf2()) { 575 APInt LowBits = (RA - 1); 576 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, 577 Depth+1); 578 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 579 KnownZero |= ~LowBits; 580 KnownOne &= LowBits; 581 break; 582 } 583 } 584 585 // Since the result is less than or equal to either operand, any leading 586 // zero bits in either operand must also exist in the result. 587 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 588 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1); 589 590 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 591 KnownZero2.countLeadingOnes()); 592 KnownOne.clearAllBits(); 593 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 594 break; 595 } 596 597 case Instruction::Alloca: { 598 AllocaInst *AI = cast<AllocaInst>(V); 599 unsigned Align = AI->getAlignment(); 600 if (Align == 0 && TD) 601 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 602 603 if (Align > 0) 604 KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align)); 605 break; 606 } 607 case Instruction::GetElementPtr: { 608 // Analyze all of the subscripts of this getelementptr instruction 609 // to determine if we can prove known low zero bits. 610 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 611 ComputeMaskedBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD, 612 Depth+1); 613 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 614 615 gep_type_iterator GTI = gep_type_begin(I); 616 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 617 Value *Index = I->getOperand(i); 618 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 619 // Handle struct member offset arithmetic. 620 if (!TD) return; 621 const StructLayout *SL = TD->getStructLayout(STy); 622 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 623 uint64_t Offset = SL->getElementOffset(Idx); 624 TrailZ = std::min(TrailZ, 625 CountTrailingZeros_64(Offset)); 626 } else { 627 // Handle array index arithmetic. 628 Type *IndexedTy = GTI.getIndexedType(); 629 if (!IndexedTy->isSized()) return; 630 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 631 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; 632 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 633 ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1); 634 TrailZ = std::min(TrailZ, 635 unsigned(CountTrailingZeros_64(TypeSize) + 636 LocalKnownZero.countTrailingOnes())); 637 } 638 } 639 640 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 641 break; 642 } 643 case Instruction::PHI: { 644 PHINode *P = cast<PHINode>(I); 645 // Handle the case of a simple two-predecessor recurrence PHI. 646 // There's a lot more that could theoretically be done here, but 647 // this is sufficient to catch some interesting cases. 648 if (P->getNumIncomingValues() == 2) { 649 for (unsigned i = 0; i != 2; ++i) { 650 Value *L = P->getIncomingValue(i); 651 Value *R = P->getIncomingValue(!i); 652 Operator *LU = dyn_cast<Operator>(L); 653 if (!LU) 654 continue; 655 unsigned Opcode = LU->getOpcode(); 656 // Check for operations that have the property that if 657 // both their operands have low zero bits, the result 658 // will have low zero bits. 659 if (Opcode == Instruction::Add || 660 Opcode == Instruction::Sub || 661 Opcode == Instruction::And || 662 Opcode == Instruction::Or || 663 Opcode == Instruction::Mul) { 664 Value *LL = LU->getOperand(0); 665 Value *LR = LU->getOperand(1); 666 // Find a recurrence. 667 if (LL == I) 668 L = LR; 669 else if (LR == I) 670 L = LL; 671 else 672 break; 673 // Ok, we have a PHI of the form L op= R. Check for low 674 // zero bits. 675 ComputeMaskedBits(R, KnownZero2, KnownOne2, TD, Depth+1); 676 677 // We need to take the minimum number of known bits 678 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 679 ComputeMaskedBits(L, KnownZero3, KnownOne3, TD, Depth+1); 680 681 KnownZero = APInt::getLowBitsSet(BitWidth, 682 std::min(KnownZero2.countTrailingOnes(), 683 KnownZero3.countTrailingOnes())); 684 break; 685 } 686 } 687 } 688 689 // Unreachable blocks may have zero-operand PHI nodes. 690 if (P->getNumIncomingValues() == 0) 691 return; 692 693 // Otherwise take the unions of the known bit sets of the operands, 694 // taking conservative care to avoid excessive recursion. 695 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 696 // Skip if every incoming value references to ourself. 697 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 698 break; 699 700 KnownZero = APInt::getAllOnesValue(BitWidth); 701 KnownOne = APInt::getAllOnesValue(BitWidth); 702 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { 703 // Skip direct self references. 704 if (P->getIncomingValue(i) == P) continue; 705 706 KnownZero2 = APInt(BitWidth, 0); 707 KnownOne2 = APInt(BitWidth, 0); 708 // Recurse, but cap the recursion to one level, because we don't 709 // want to waste time spinning around in loops. 710 ComputeMaskedBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD, 711 MaxDepth-1); 712 KnownZero &= KnownZero2; 713 KnownOne &= KnownOne2; 714 // If all bits have been ruled out, there's no need to check 715 // more operands. 716 if (!KnownZero && !KnownOne) 717 break; 718 } 719 } 720 break; 721 } 722 case Instruction::Call: 723 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 724 switch (II->getIntrinsicID()) { 725 default: break; 726 case Intrinsic::ctlz: 727 case Intrinsic::cttz: { 728 unsigned LowBits = Log2_32(BitWidth)+1; 729 // If this call is undefined for 0, the result will be less than 2^n. 730 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 731 LowBits -= 1; 732 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 733 break; 734 } 735 case Intrinsic::ctpop: { 736 unsigned LowBits = Log2_32(BitWidth)+1; 737 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 738 break; 739 } 740 case Intrinsic::x86_sse42_crc32_64_8: 741 case Intrinsic::x86_sse42_crc32_64_64: 742 KnownZero = APInt::getHighBitsSet(64, 32); 743 break; 744 } 745 } 746 break; 747 case Instruction::ExtractValue: 748 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 749 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 750 if (EVI->getNumIndices() != 1) break; 751 if (EVI->getIndices()[0] == 0) { 752 switch (II->getIntrinsicID()) { 753 default: break; 754 case Intrinsic::uadd_with_overflow: 755 case Intrinsic::sadd_with_overflow: 756 ComputeMaskedBitsAddSub(true, II->getArgOperand(0), 757 II->getArgOperand(1), false, KnownZero, 758 KnownOne, KnownZero2, KnownOne2, TD, Depth); 759 break; 760 case Intrinsic::usub_with_overflow: 761 case Intrinsic::ssub_with_overflow: 762 ComputeMaskedBitsAddSub(false, II->getArgOperand(0), 763 II->getArgOperand(1), false, KnownZero, 764 KnownOne, KnownZero2, KnownOne2, TD, Depth); 765 break; 766 case Intrinsic::umul_with_overflow: 767 case Intrinsic::smul_with_overflow: 768 ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1), 769 false, KnownZero, KnownOne, 770 KnownZero2, KnownOne2, TD, Depth); 771 break; 772 } 773 } 774 } 775 } 776} 777 778/// ComputeSignBit - Determine whether the sign bit is known to be zero or 779/// one. Convenience wrapper around ComputeMaskedBits. 780void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 781 const TargetData *TD, unsigned Depth) { 782 unsigned BitWidth = getBitWidth(V->getType(), TD); 783 if (!BitWidth) { 784 KnownZero = false; 785 KnownOne = false; 786 return; 787 } 788 APInt ZeroBits(BitWidth, 0); 789 APInt OneBits(BitWidth, 0); 790 ComputeMaskedBits(V, ZeroBits, OneBits, TD, Depth); 791 KnownOne = OneBits[BitWidth - 1]; 792 KnownZero = ZeroBits[BitWidth - 1]; 793} 794 795/// isPowerOfTwo - Return true if the given value is known to have exactly one 796/// bit set when defined. For vectors return true if every element is known to 797/// be a power of two when defined. Supports values with integer or pointer 798/// types and vectors of integers. 799bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero, 800 unsigned Depth) { 801 if (Constant *C = dyn_cast<Constant>(V)) { 802 if (C->isNullValue()) 803 return OrZero; 804 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 805 return CI->getValue().isPowerOf2(); 806 // TODO: Handle vector constants. 807 } 808 809 // 1 << X is clearly a power of two if the one is not shifted off the end. If 810 // it is shifted off the end then the result is undefined. 811 if (match(V, m_Shl(m_One(), m_Value()))) 812 return true; 813 814 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 815 // bottom. If it is shifted off the bottom then the result is undefined. 816 if (match(V, m_LShr(m_SignBit(), m_Value()))) 817 return true; 818 819 // The remaining tests are all recursive, so bail out if we hit the limit. 820 if (Depth++ == MaxDepth) 821 return false; 822 823 Value *X = 0, *Y = 0; 824 // A shift of a power of two is a power of two or zero. 825 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 826 match(V, m_Shr(m_Value(X), m_Value())))) 827 return isPowerOfTwo(X, TD, /*OrZero*/true, Depth); 828 829 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 830 return isPowerOfTwo(ZI->getOperand(0), TD, OrZero, Depth); 831 832 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 833 return isPowerOfTwo(SI->getTrueValue(), TD, OrZero, Depth) && 834 isPowerOfTwo(SI->getFalseValue(), TD, OrZero, Depth); 835 836 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 837 // A power of two and'd with anything is a power of two or zero. 838 if (isPowerOfTwo(X, TD, /*OrZero*/true, Depth) || 839 isPowerOfTwo(Y, TD, /*OrZero*/true, Depth)) 840 return true; 841 // X & (-X) is always a power of two or zero. 842 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 843 return true; 844 return false; 845 } 846 847 // An exact divide or right shift can only shift off zero bits, so the result 848 // is a power of two only if the first operand is a power of two and not 849 // copying a sign bit (sdiv int_min, 2). 850 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 851 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 852 return isPowerOfTwo(cast<Operator>(V)->getOperand(0), TD, OrZero, Depth); 853 } 854 855 return false; 856} 857 858/// isKnownNonZero - Return true if the given value is known to be non-zero 859/// when defined. For vectors return true if every element is known to be 860/// non-zero when defined. Supports values with integer or pointer type and 861/// vectors of integers. 862bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) { 863 if (Constant *C = dyn_cast<Constant>(V)) { 864 if (C->isNullValue()) 865 return false; 866 if (isa<ConstantInt>(C)) 867 // Must be non-zero due to null test above. 868 return true; 869 // TODO: Handle vectors 870 return false; 871 } 872 873 // The remaining tests are all recursive, so bail out if we hit the limit. 874 if (Depth++ >= MaxDepth) 875 return false; 876 877 unsigned BitWidth = getBitWidth(V->getType(), TD); 878 879 // X | Y != 0 if X != 0 or Y != 0. 880 Value *X = 0, *Y = 0; 881 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 882 return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth); 883 884 // ext X != 0 if X != 0. 885 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 886 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth); 887 888 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 889 // if the lowest bit is shifted off the end. 890 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 891 // shl nuw can't remove any non-zero bits. 892 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 893 if (BO->hasNoUnsignedWrap()) 894 return isKnownNonZero(X, TD, Depth); 895 896 APInt KnownZero(BitWidth, 0); 897 APInt KnownOne(BitWidth, 0); 898 ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth); 899 if (KnownOne[0]) 900 return true; 901 } 902 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 903 // defined if the sign bit is shifted off the end. 904 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 905 // shr exact can only shift out zero bits. 906 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 907 if (BO->isExact()) 908 return isKnownNonZero(X, TD, Depth); 909 910 bool XKnownNonNegative, XKnownNegative; 911 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); 912 if (XKnownNegative) 913 return true; 914 } 915 // div exact can only produce a zero if the dividend is zero. 916 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 917 return isKnownNonZero(X, TD, Depth); 918 } 919 // X + Y. 920 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 921 bool XKnownNonNegative, XKnownNegative; 922 bool YKnownNonNegative, YKnownNegative; 923 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); 924 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth); 925 926 // If X and Y are both non-negative (as signed values) then their sum is not 927 // zero unless both X and Y are zero. 928 if (XKnownNonNegative && YKnownNonNegative) 929 if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth)) 930 return true; 931 932 // If X and Y are both negative (as signed values) then their sum is not 933 // zero unless both X and Y equal INT_MIN. 934 if (BitWidth && XKnownNegative && YKnownNegative) { 935 APInt KnownZero(BitWidth, 0); 936 APInt KnownOne(BitWidth, 0); 937 APInt Mask = APInt::getSignedMaxValue(BitWidth); 938 // The sign bit of X is set. If some other bit is set then X is not equal 939 // to INT_MIN. 940 ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth); 941 if ((KnownOne & Mask) != 0) 942 return true; 943 // The sign bit of Y is set. If some other bit is set then Y is not equal 944 // to INT_MIN. 945 ComputeMaskedBits(Y, KnownZero, KnownOne, TD, Depth); 946 if ((KnownOne & Mask) != 0) 947 return true; 948 } 949 950 // The sum of a non-negative number and a power of two is not zero. 951 if (XKnownNonNegative && isPowerOfTwo(Y, TD, /*OrZero*/false, Depth)) 952 return true; 953 if (YKnownNonNegative && isPowerOfTwo(X, TD, /*OrZero*/false, Depth)) 954 return true; 955 } 956 // X * Y. 957 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 958 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 959 // If X and Y are non-zero then so is X * Y as long as the multiplication 960 // does not overflow. 961 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 962 isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth)) 963 return true; 964 } 965 // (C ? X : Y) != 0 if X != 0 and Y != 0. 966 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 967 if (isKnownNonZero(SI->getTrueValue(), TD, Depth) && 968 isKnownNonZero(SI->getFalseValue(), TD, Depth)) 969 return true; 970 } 971 972 if (!BitWidth) return false; 973 APInt KnownZero(BitWidth, 0); 974 APInt KnownOne(BitWidth, 0); 975 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 976 return KnownOne != 0; 977} 978 979/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 980/// this predicate to simplify operations downstream. Mask is known to be zero 981/// for bits that V cannot have. 982/// 983/// This function is defined on values with integer type, values with pointer 984/// type (but only if TD is non-null), and vectors of integers. In the case 985/// where V is a vector, the mask, known zero, and known one values are the 986/// same width as the vector element, and the bit is set only if it is true 987/// for all of the elements in the vector. 988bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, 989 const TargetData *TD, unsigned Depth) { 990 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 991 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 992 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 993 return (KnownZero & Mask) == Mask; 994} 995 996 997 998/// ComputeNumSignBits - Return the number of times the sign bit of the 999/// register is replicated into the other bits. We know that at least 1 bit 1000/// is always equal to the sign bit (itself), but other cases can give us 1001/// information. For example, immediately after an "ashr X, 2", we know that 1002/// the top 3 bits are all equal to each other, so we return 3. 1003/// 1004/// 'Op' must have a scalar integer type. 1005/// 1006unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, 1007 unsigned Depth) { 1008 assert((TD || V->getType()->isIntOrIntVectorTy()) && 1009 "ComputeNumSignBits requires a TargetData object to operate " 1010 "on non-integer values!"); 1011 Type *Ty = V->getType(); 1012 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : 1013 Ty->getScalarSizeInBits(); 1014 unsigned Tmp, Tmp2; 1015 unsigned FirstAnswer = 1; 1016 1017 // Note that ConstantInt is handled by the general ComputeMaskedBits case 1018 // below. 1019 1020 if (Depth == 6) 1021 return 1; // Limit search depth. 1022 1023 Operator *U = dyn_cast<Operator>(V); 1024 switch (Operator::getOpcode(V)) { 1025 default: break; 1026 case Instruction::SExt: 1027 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 1028 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; 1029 1030 case Instruction::AShr: { 1031 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1032 // ashr X, C -> adds C sign bits. Vectors too. 1033 const APInt *ShAmt; 1034 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1035 Tmp += ShAmt->getZExtValue(); 1036 if (Tmp > TyBits) Tmp = TyBits; 1037 } 1038 return Tmp; 1039 } 1040 case Instruction::Shl: { 1041 const APInt *ShAmt; 1042 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1043 // shl destroys sign bits. 1044 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1045 Tmp2 = ShAmt->getZExtValue(); 1046 if (Tmp2 >= TyBits || // Bad shift. 1047 Tmp2 >= Tmp) break; // Shifted all sign bits out. 1048 return Tmp - Tmp2; 1049 } 1050 break; 1051 } 1052 case Instruction::And: 1053 case Instruction::Or: 1054 case Instruction::Xor: // NOT is handled here. 1055 // Logical binary ops preserve the number of sign bits at the worst. 1056 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1057 if (Tmp != 1) { 1058 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1059 FirstAnswer = std::min(Tmp, Tmp2); 1060 // We computed what we know about the sign bits as our first 1061 // answer. Now proceed to the generic code that uses 1062 // ComputeMaskedBits, and pick whichever answer is better. 1063 } 1064 break; 1065 1066 case Instruction::Select: 1067 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1068 if (Tmp == 1) return 1; // Early out. 1069 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1); 1070 return std::min(Tmp, Tmp2); 1071 1072 case Instruction::Add: 1073 // Add can have at most one carry bit. Thus we know that the output 1074 // is, at worst, one more bit than the inputs. 1075 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1076 if (Tmp == 1) return 1; // Early out. 1077 1078 // Special case decrementing a value (ADD X, -1): 1079 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) 1080 if (CRHS->isAllOnesValue()) { 1081 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1082 ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 1083 1084 // If the input is known to be 0 or 1, the output is 0/-1, which is all 1085 // sign bits set. 1086 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 1087 return TyBits; 1088 1089 // If we are subtracting one from a positive number, there is no carry 1090 // out of the result. 1091 if (KnownZero.isNegative()) 1092 return Tmp; 1093 } 1094 1095 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1096 if (Tmp2 == 1) return 1; 1097 return std::min(Tmp, Tmp2)-1; 1098 1099 case Instruction::Sub: 1100 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1101 if (Tmp2 == 1) return 1; 1102 1103 // Handle NEG. 1104 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) 1105 if (CLHS->isNullValue()) { 1106 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1107 ComputeMaskedBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 1108 // If the input is known to be 0 or 1, the output is 0/-1, which is all 1109 // sign bits set. 1110 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 1111 return TyBits; 1112 1113 // If the input is known to be positive (the sign bit is known clear), 1114 // the output of the NEG has the same number of sign bits as the input. 1115 if (KnownZero.isNegative()) 1116 return Tmp2; 1117 1118 // Otherwise, we treat this like a SUB. 1119 } 1120 1121 // Sub can have at most one carry bit. Thus we know that the output 1122 // is, at worst, one more bit than the inputs. 1123 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1124 if (Tmp == 1) return 1; // Early out. 1125 return std::min(Tmp, Tmp2)-1; 1126 1127 case Instruction::PHI: { 1128 PHINode *PN = cast<PHINode>(U); 1129 // Don't analyze large in-degree PHIs. 1130 if (PN->getNumIncomingValues() > 4) break; 1131 1132 // Take the minimum of all incoming values. This can't infinitely loop 1133 // because of our depth threshold. 1134 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1); 1135 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 1136 if (Tmp == 1) return Tmp; 1137 Tmp = std::min(Tmp, 1138 ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1)); 1139 } 1140 return Tmp; 1141 } 1142 1143 case Instruction::Trunc: 1144 // FIXME: it's tricky to do anything useful for this, but it is an important 1145 // case for targets like X86. 1146 break; 1147 } 1148 1149 // Finally, if we can prove that the top bits of the result are 0's or 1's, 1150 // use this information. 1151 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1152 APInt Mask; 1153 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 1154 1155 if (KnownZero.isNegative()) { // sign bit is 0 1156 Mask = KnownZero; 1157 } else if (KnownOne.isNegative()) { // sign bit is 1; 1158 Mask = KnownOne; 1159 } else { 1160 // Nothing known. 1161 return FirstAnswer; 1162 } 1163 1164 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 1165 // the number of identical bits in the top of the input value. 1166 Mask = ~Mask; 1167 Mask <<= Mask.getBitWidth()-TyBits; 1168 // Return # leading zeros. We use 'min' here in case Val was zero before 1169 // shifting. We don't want to return '64' as for an i32 "0". 1170 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 1171} 1172 1173/// ComputeMultiple - This function computes the integer multiple of Base that 1174/// equals V. If successful, it returns true and returns the multiple in 1175/// Multiple. If unsuccessful, it returns false. It looks 1176/// through SExt instructions only if LookThroughSExt is true. 1177bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 1178 bool LookThroughSExt, unsigned Depth) { 1179 const unsigned MaxDepth = 6; 1180 1181 assert(V && "No Value?"); 1182 assert(Depth <= MaxDepth && "Limit Search Depth"); 1183 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 1184 1185 Type *T = V->getType(); 1186 1187 ConstantInt *CI = dyn_cast<ConstantInt>(V); 1188 1189 if (Base == 0) 1190 return false; 1191 1192 if (Base == 1) { 1193 Multiple = V; 1194 return true; 1195 } 1196 1197 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 1198 Constant *BaseVal = ConstantInt::get(T, Base); 1199 if (CO && CO == BaseVal) { 1200 // Multiple is 1. 1201 Multiple = ConstantInt::get(T, 1); 1202 return true; 1203 } 1204 1205 if (CI && CI->getZExtValue() % Base == 0) { 1206 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 1207 return true; 1208 } 1209 1210 if (Depth == MaxDepth) return false; // Limit search depth. 1211 1212 Operator *I = dyn_cast<Operator>(V); 1213 if (!I) return false; 1214 1215 switch (I->getOpcode()) { 1216 default: break; 1217 case Instruction::SExt: 1218 if (!LookThroughSExt) return false; 1219 // otherwise fall through to ZExt 1220 case Instruction::ZExt: 1221 return ComputeMultiple(I->getOperand(0), Base, Multiple, 1222 LookThroughSExt, Depth+1); 1223 case Instruction::Shl: 1224 case Instruction::Mul: { 1225 Value *Op0 = I->getOperand(0); 1226 Value *Op1 = I->getOperand(1); 1227 1228 if (I->getOpcode() == Instruction::Shl) { 1229 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 1230 if (!Op1CI) return false; 1231 // Turn Op0 << Op1 into Op0 * 2^Op1 1232 APInt Op1Int = Op1CI->getValue(); 1233 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 1234 APInt API(Op1Int.getBitWidth(), 0); 1235 API.setBit(BitToSet); 1236 Op1 = ConstantInt::get(V->getContext(), API); 1237 } 1238 1239 Value *Mul0 = NULL; 1240 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 1241 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 1242 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 1243 if (Op1C->getType()->getPrimitiveSizeInBits() < 1244 MulC->getType()->getPrimitiveSizeInBits()) 1245 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 1246 if (Op1C->getType()->getPrimitiveSizeInBits() > 1247 MulC->getType()->getPrimitiveSizeInBits()) 1248 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 1249 1250 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 1251 Multiple = ConstantExpr::getMul(MulC, Op1C); 1252 return true; 1253 } 1254 1255 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 1256 if (Mul0CI->getValue() == 1) { 1257 // V == Base * Op1, so return Op1 1258 Multiple = Op1; 1259 return true; 1260 } 1261 } 1262 1263 Value *Mul1 = NULL; 1264 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 1265 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 1266 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 1267 if (Op0C->getType()->getPrimitiveSizeInBits() < 1268 MulC->getType()->getPrimitiveSizeInBits()) 1269 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 1270 if (Op0C->getType()->getPrimitiveSizeInBits() > 1271 MulC->getType()->getPrimitiveSizeInBits()) 1272 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 1273 1274 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 1275 Multiple = ConstantExpr::getMul(MulC, Op0C); 1276 return true; 1277 } 1278 1279 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 1280 if (Mul1CI->getValue() == 1) { 1281 // V == Base * Op0, so return Op0 1282 Multiple = Op0; 1283 return true; 1284 } 1285 } 1286 } 1287 } 1288 1289 // We could not determine if V is a multiple of Base. 1290 return false; 1291} 1292 1293/// CannotBeNegativeZero - Return true if we can prove that the specified FP 1294/// value is never equal to -0.0. 1295/// 1296/// NOTE: this function will need to be revisited when we support non-default 1297/// rounding modes! 1298/// 1299bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 1300 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 1301 return !CFP->getValueAPF().isNegZero(); 1302 1303 if (Depth == 6) 1304 return 1; // Limit search depth. 1305 1306 const Operator *I = dyn_cast<Operator>(V); 1307 if (I == 0) return false; 1308 1309 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 1310 if (I->getOpcode() == Instruction::FAdd && 1311 isa<ConstantFP>(I->getOperand(1)) && 1312 cast<ConstantFP>(I->getOperand(1))->isNullValue()) 1313 return true; 1314 1315 // sitofp and uitofp turn into +0.0 for zero. 1316 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 1317 return true; 1318 1319 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1320 // sqrt(-0.0) = -0.0, no other negative results are possible. 1321 if (II->getIntrinsicID() == Intrinsic::sqrt) 1322 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); 1323 1324 if (const CallInst *CI = dyn_cast<CallInst>(I)) 1325 if (const Function *F = CI->getCalledFunction()) { 1326 if (F->isDeclaration()) { 1327 // abs(x) != -0.0 1328 if (F->getName() == "abs") return true; 1329 // fabs[lf](x) != -0.0 1330 if (F->getName() == "fabs") return true; 1331 if (F->getName() == "fabsf") return true; 1332 if (F->getName() == "fabsl") return true; 1333 if (F->getName() == "sqrt" || F->getName() == "sqrtf" || 1334 F->getName() == "sqrtl") 1335 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); 1336 } 1337 } 1338 1339 return false; 1340} 1341 1342/// isBytewiseValue - If the specified value can be set by repeating the same 1343/// byte in memory, return the i8 value that it is represented with. This is 1344/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 1345/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 1346/// byte store (e.g. i16 0x1234), return null. 1347Value *llvm::isBytewiseValue(Value *V) { 1348 // All byte-wide stores are splatable, even of arbitrary variables. 1349 if (V->getType()->isIntegerTy(8)) return V; 1350 1351 // Handle 'null' ConstantArrayZero etc. 1352 if (Constant *C = dyn_cast<Constant>(V)) 1353 if (C->isNullValue()) 1354 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 1355 1356 // Constant float and double values can be handled as integer values if the 1357 // corresponding integer value is "byteable". An important case is 0.0. 1358 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 1359 if (CFP->getType()->isFloatTy()) 1360 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 1361 if (CFP->getType()->isDoubleTy()) 1362 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 1363 // Don't handle long double formats, which have strange constraints. 1364 } 1365 1366 // We can handle constant integers that are power of two in size and a 1367 // multiple of 8 bits. 1368 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 1369 unsigned Width = CI->getBitWidth(); 1370 if (isPowerOf2_32(Width) && Width > 8) { 1371 // We can handle this value if the recursive binary decomposition is the 1372 // same at all levels. 1373 APInt Val = CI->getValue(); 1374 APInt Val2; 1375 while (Val.getBitWidth() != 8) { 1376 unsigned NextWidth = Val.getBitWidth()/2; 1377 Val2 = Val.lshr(NextWidth); 1378 Val2 = Val2.trunc(Val.getBitWidth()/2); 1379 Val = Val.trunc(Val.getBitWidth()/2); 1380 1381 // If the top/bottom halves aren't the same, reject it. 1382 if (Val != Val2) 1383 return 0; 1384 } 1385 return ConstantInt::get(V->getContext(), Val); 1386 } 1387 } 1388 1389 // A ConstantDataArray/Vector is splatable if all its members are equal and 1390 // also splatable. 1391 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 1392 Value *Elt = CA->getElementAsConstant(0); 1393 Value *Val = isBytewiseValue(Elt); 1394 if (!Val) 1395 return 0; 1396 1397 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 1398 if (CA->getElementAsConstant(I) != Elt) 1399 return 0; 1400 1401 return Val; 1402 } 1403 1404 // Conceptually, we could handle things like: 1405 // %a = zext i8 %X to i16 1406 // %b = shl i16 %a, 8 1407 // %c = or i16 %a, %b 1408 // but until there is an example that actually needs this, it doesn't seem 1409 // worth worrying about. 1410 return 0; 1411} 1412 1413 1414// This is the recursive version of BuildSubAggregate. It takes a few different 1415// arguments. Idxs is the index within the nested struct From that we are 1416// looking at now (which is of type IndexedType). IdxSkip is the number of 1417// indices from Idxs that should be left out when inserting into the resulting 1418// struct. To is the result struct built so far, new insertvalue instructions 1419// build on that. 1420static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 1421 SmallVector<unsigned, 10> &Idxs, 1422 unsigned IdxSkip, 1423 Instruction *InsertBefore) { 1424 llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType); 1425 if (STy) { 1426 // Save the original To argument so we can modify it 1427 Value *OrigTo = To; 1428 // General case, the type indexed by Idxs is a struct 1429 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1430 // Process each struct element recursively 1431 Idxs.push_back(i); 1432 Value *PrevTo = To; 1433 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 1434 InsertBefore); 1435 Idxs.pop_back(); 1436 if (!To) { 1437 // Couldn't find any inserted value for this index? Cleanup 1438 while (PrevTo != OrigTo) { 1439 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 1440 PrevTo = Del->getAggregateOperand(); 1441 Del->eraseFromParent(); 1442 } 1443 // Stop processing elements 1444 break; 1445 } 1446 } 1447 // If we successfully found a value for each of our subaggregates 1448 if (To) 1449 return To; 1450 } 1451 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 1452 // the struct's elements had a value that was inserted directly. In the latter 1453 // case, perhaps we can't determine each of the subelements individually, but 1454 // we might be able to find the complete struct somewhere. 1455 1456 // Find the value that is at that particular spot 1457 Value *V = FindInsertedValue(From, Idxs); 1458 1459 if (!V) 1460 return NULL; 1461 1462 // Insert the value in the new (sub) aggregrate 1463 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 1464 "tmp", InsertBefore); 1465} 1466 1467// This helper takes a nested struct and extracts a part of it (which is again a 1468// struct) into a new value. For example, given the struct: 1469// { a, { b, { c, d }, e } } 1470// and the indices "1, 1" this returns 1471// { c, d }. 1472// 1473// It does this by inserting an insertvalue for each element in the resulting 1474// struct, as opposed to just inserting a single struct. This will only work if 1475// each of the elements of the substruct are known (ie, inserted into From by an 1476// insertvalue instruction somewhere). 1477// 1478// All inserted insertvalue instructions are inserted before InsertBefore 1479static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 1480 Instruction *InsertBefore) { 1481 assert(InsertBefore && "Must have someplace to insert!"); 1482 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 1483 idx_range); 1484 Value *To = UndefValue::get(IndexedType); 1485 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 1486 unsigned IdxSkip = Idxs.size(); 1487 1488 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 1489} 1490 1491/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if 1492/// the scalar value indexed is already around as a register, for example if it 1493/// were inserted directly into the aggregrate. 1494/// 1495/// If InsertBefore is not null, this function will duplicate (modified) 1496/// insertvalues when a part of a nested struct is extracted. 1497Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 1498 Instruction *InsertBefore) { 1499 // Nothing to index? Just return V then (this is useful at the end of our 1500 // recursion). 1501 if (idx_range.empty()) 1502 return V; 1503 // We have indices, so V should have an indexable type. 1504 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 1505 "Not looking at a struct or array?"); 1506 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 1507 "Invalid indices for type?"); 1508 1509 if (Constant *C = dyn_cast<Constant>(V)) { 1510 C = C->getAggregateElement(idx_range[0]); 1511 if (C == 0) return 0; 1512 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 1513 } 1514 1515 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 1516 // Loop the indices for the insertvalue instruction in parallel with the 1517 // requested indices 1518 const unsigned *req_idx = idx_range.begin(); 1519 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 1520 i != e; ++i, ++req_idx) { 1521 if (req_idx == idx_range.end()) { 1522 // We can't handle this without inserting insertvalues 1523 if (!InsertBefore) 1524 return 0; 1525 1526 // The requested index identifies a part of a nested aggregate. Handle 1527 // this specially. For example, 1528 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 1529 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 1530 // %C = extractvalue {i32, { i32, i32 } } %B, 1 1531 // This can be changed into 1532 // %A = insertvalue {i32, i32 } undef, i32 10, 0 1533 // %C = insertvalue {i32, i32 } %A, i32 11, 1 1534 // which allows the unused 0,0 element from the nested struct to be 1535 // removed. 1536 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 1537 InsertBefore); 1538 } 1539 1540 // This insert value inserts something else than what we are looking for. 1541 // See if the (aggregrate) value inserted into has the value we are 1542 // looking for, then. 1543 if (*req_idx != *i) 1544 return FindInsertedValue(I->getAggregateOperand(), idx_range, 1545 InsertBefore); 1546 } 1547 // If we end up here, the indices of the insertvalue match with those 1548 // requested (though possibly only partially). Now we recursively look at 1549 // the inserted value, passing any remaining indices. 1550 return FindInsertedValue(I->getInsertedValueOperand(), 1551 makeArrayRef(req_idx, idx_range.end()), 1552 InsertBefore); 1553 } 1554 1555 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 1556 // If we're extracting a value from an aggregrate that was extracted from 1557 // something else, we can extract from that something else directly instead. 1558 // However, we will need to chain I's indices with the requested indices. 1559 1560 // Calculate the number of indices required 1561 unsigned size = I->getNumIndices() + idx_range.size(); 1562 // Allocate some space to put the new indices in 1563 SmallVector<unsigned, 5> Idxs; 1564 Idxs.reserve(size); 1565 // Add indices from the extract value instruction 1566 Idxs.append(I->idx_begin(), I->idx_end()); 1567 1568 // Add requested indices 1569 Idxs.append(idx_range.begin(), idx_range.end()); 1570 1571 assert(Idxs.size() == size 1572 && "Number of indices added not correct?"); 1573 1574 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 1575 } 1576 // Otherwise, we don't know (such as, extracting from a function return value 1577 // or load instruction) 1578 return 0; 1579} 1580 1581/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if 1582/// it can be expressed as a base pointer plus a constant offset. Return the 1583/// base and offset to the caller. 1584Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 1585 const TargetData &TD) { 1586 Operator *PtrOp = dyn_cast<Operator>(Ptr); 1587 if (PtrOp == 0 || Ptr->getType()->isVectorTy()) 1588 return Ptr; 1589 1590 // Just look through bitcasts. 1591 if (PtrOp->getOpcode() == Instruction::BitCast) 1592 return GetPointerBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD); 1593 1594 // If this is a GEP with constant indices, we can look through it. 1595 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp); 1596 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr; 1597 1598 gep_type_iterator GTI = gep_type_begin(GEP); 1599 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E; 1600 ++I, ++GTI) { 1601 ConstantInt *OpC = cast<ConstantInt>(*I); 1602 if (OpC->isZero()) continue; 1603 1604 // Handle a struct and array indices which add their offset to the pointer. 1605 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1606 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 1607 } else { 1608 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 1609 Offset += OpC->getSExtValue()*Size; 1610 } 1611 } 1612 1613 // Re-sign extend from the pointer size if needed to get overflow edge cases 1614 // right. 1615 unsigned PtrSize = TD.getPointerSizeInBits(); 1616 if (PtrSize < 64) 1617 Offset = SignExtend64(Offset, PtrSize); 1618 1619 return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); 1620} 1621 1622 1623/// getConstantStringInfo - This function computes the length of a 1624/// null-terminated C string pointed to by V. If successful, it returns true 1625/// and returns the string in Str. If unsuccessful, it returns false. 1626bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 1627 uint64_t Offset, bool TrimAtNul) { 1628 assert(V); 1629 1630 // Look through bitcast instructions and geps. 1631 V = V->stripPointerCasts(); 1632 1633 // If the value is a GEP instructionor constant expression, treat it as an 1634 // offset. 1635 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1636 // Make sure the GEP has exactly three arguments. 1637 if (GEP->getNumOperands() != 3) 1638 return false; 1639 1640 // Make sure the index-ee is a pointer to array of i8. 1641 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 1642 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 1643 if (AT == 0 || !AT->getElementType()->isIntegerTy(8)) 1644 return false; 1645 1646 // Check to make sure that the first operand of the GEP is an integer and 1647 // has value 0 so that we are sure we're indexing into the initializer. 1648 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 1649 if (FirstIdx == 0 || !FirstIdx->isZero()) 1650 return false; 1651 1652 // If the second index isn't a ConstantInt, then this is a variable index 1653 // into the array. If this occurs, we can't say anything meaningful about 1654 // the string. 1655 uint64_t StartIdx = 0; 1656 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 1657 StartIdx = CI->getZExtValue(); 1658 else 1659 return false; 1660 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset); 1661 } 1662 1663 // The GEP instruction, constant or instruction, must reference a global 1664 // variable that is a constant and is initialized. The referenced constant 1665 // initializer is the array that we'll use for optimization. 1666 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 1667 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 1668 return false; 1669 1670 // Handle the all-zeros case 1671 if (GV->getInitializer()->isNullValue()) { 1672 // This is a degenerate case. The initializer is constant zero so the 1673 // length of the string must be zero. 1674 Str = ""; 1675 return true; 1676 } 1677 1678 // Must be a Constant Array 1679 const ConstantDataArray *Array = 1680 dyn_cast<ConstantDataArray>(GV->getInitializer()); 1681 if (Array == 0 || !Array->isString()) 1682 return false; 1683 1684 // Get the number of elements in the array 1685 uint64_t NumElts = Array->getType()->getArrayNumElements(); 1686 1687 // Start out with the entire array in the StringRef. 1688 Str = Array->getAsString(); 1689 1690 if (Offset > NumElts) 1691 return false; 1692 1693 // Skip over 'offset' bytes. 1694 Str = Str.substr(Offset); 1695 1696 if (TrimAtNul) { 1697 // Trim off the \0 and anything after it. If the array is not nul 1698 // terminated, we just return the whole end of string. The client may know 1699 // some other way that the string is length-bound. 1700 Str = Str.substr(0, Str.find('\0')); 1701 } 1702 return true; 1703} 1704 1705// These next two are very similar to the above, but also look through PHI 1706// nodes. 1707// TODO: See if we can integrate these two together. 1708 1709/// GetStringLengthH - If we can compute the length of the string pointed to by 1710/// the specified pointer, return 'len+1'. If we can't, return 0. 1711static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) { 1712 // Look through noop bitcast instructions. 1713 V = V->stripPointerCasts(); 1714 1715 // If this is a PHI node, there are two cases: either we have already seen it 1716 // or we haven't. 1717 if (PHINode *PN = dyn_cast<PHINode>(V)) { 1718 if (!PHIs.insert(PN)) 1719 return ~0ULL; // already in the set. 1720 1721 // If it was new, see if all the input strings are the same length. 1722 uint64_t LenSoFar = ~0ULL; 1723 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1724 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs); 1725 if (Len == 0) return 0; // Unknown length -> unknown. 1726 1727 if (Len == ~0ULL) continue; 1728 1729 if (Len != LenSoFar && LenSoFar != ~0ULL) 1730 return 0; // Disagree -> unknown. 1731 LenSoFar = Len; 1732 } 1733 1734 // Success, all agree. 1735 return LenSoFar; 1736 } 1737 1738 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 1739 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1740 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 1741 if (Len1 == 0) return 0; 1742 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 1743 if (Len2 == 0) return 0; 1744 if (Len1 == ~0ULL) return Len2; 1745 if (Len2 == ~0ULL) return Len1; 1746 if (Len1 != Len2) return 0; 1747 return Len1; 1748 } 1749 1750 // Otherwise, see if we can read the string. 1751 StringRef StrData; 1752 if (!getConstantStringInfo(V, StrData)) 1753 return 0; 1754 1755 return StrData.size()+1; 1756} 1757 1758/// GetStringLength - If we can compute the length of the string pointed to by 1759/// the specified pointer, return 'len+1'. If we can't, return 0. 1760uint64_t llvm::GetStringLength(Value *V) { 1761 if (!V->getType()->isPointerTy()) return 0; 1762 1763 SmallPtrSet<PHINode*, 32> PHIs; 1764 uint64_t Len = GetStringLengthH(V, PHIs); 1765 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 1766 // an empty string as a length. 1767 return Len == ~0ULL ? 1 : Len; 1768} 1769 1770Value * 1771llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) { 1772 if (!V->getType()->isPointerTy()) 1773 return V; 1774 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 1775 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1776 V = GEP->getPointerOperand(); 1777 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 1778 V = cast<Operator>(V)->getOperand(0); 1779 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1780 if (GA->mayBeOverridden()) 1781 return V; 1782 V = GA->getAliasee(); 1783 } else { 1784 // See if InstructionSimplify knows any relevant tricks. 1785 if (Instruction *I = dyn_cast<Instruction>(V)) 1786 // TODO: Acquire a DominatorTree and use it. 1787 if (Value *Simplified = SimplifyInstruction(I, TD, 0)) { 1788 V = Simplified; 1789 continue; 1790 } 1791 1792 return V; 1793 } 1794 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 1795 } 1796 return V; 1797} 1798 1799void 1800llvm::GetUnderlyingObjects(Value *V, 1801 SmallVectorImpl<Value *> &Objects, 1802 const TargetData *TD, 1803 unsigned MaxLookup) { 1804 SmallPtrSet<Value *, 4> Visited; 1805 SmallVector<Value *, 4> Worklist; 1806 Worklist.push_back(V); 1807 do { 1808 Value *P = Worklist.pop_back_val(); 1809 P = GetUnderlyingObject(P, TD, MaxLookup); 1810 1811 if (!Visited.insert(P)) 1812 continue; 1813 1814 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 1815 Worklist.push_back(SI->getTrueValue()); 1816 Worklist.push_back(SI->getFalseValue()); 1817 continue; 1818 } 1819 1820 if (PHINode *PN = dyn_cast<PHINode>(P)) { 1821 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1822 Worklist.push_back(PN->getIncomingValue(i)); 1823 continue; 1824 } 1825 1826 Objects.push_back(P); 1827 } while (!Worklist.empty()); 1828} 1829 1830/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer 1831/// are lifetime markers. 1832/// 1833bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 1834 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 1835 UI != UE; ++UI) { 1836 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI); 1837 if (!II) return false; 1838 1839 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1840 II->getIntrinsicID() != Intrinsic::lifetime_end) 1841 return false; 1842 } 1843 return true; 1844} 1845 1846bool llvm::isSafeToSpeculativelyExecute(const Value *V, 1847 const TargetData *TD) { 1848 const Operator *Inst = dyn_cast<Operator>(V); 1849 if (!Inst) 1850 return false; 1851 1852 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 1853 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 1854 if (C->canTrap()) 1855 return false; 1856 1857 switch (Inst->getOpcode()) { 1858 default: 1859 return true; 1860 case Instruction::UDiv: 1861 case Instruction::URem: 1862 // x / y is undefined if y == 0, but calcuations like x / 3 are safe. 1863 return isKnownNonZero(Inst->getOperand(1), TD); 1864 case Instruction::SDiv: 1865 case Instruction::SRem: { 1866 Value *Op = Inst->getOperand(1); 1867 // x / y is undefined if y == 0 1868 if (!isKnownNonZero(Op, TD)) 1869 return false; 1870 // x / y might be undefined if y == -1 1871 unsigned BitWidth = getBitWidth(Op->getType(), TD); 1872 if (BitWidth == 0) 1873 return false; 1874 APInt KnownZero(BitWidth, 0); 1875 APInt KnownOne(BitWidth, 0); 1876 ComputeMaskedBits(Op, KnownZero, KnownOne, TD); 1877 return !!KnownZero; 1878 } 1879 case Instruction::Load: { 1880 const LoadInst *LI = cast<LoadInst>(Inst); 1881 if (!LI->isUnordered()) 1882 return false; 1883 return LI->getPointerOperand()->isDereferenceablePointer(); 1884 } 1885 case Instruction::Call: { 1886 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 1887 switch (II->getIntrinsicID()) { 1888 // These synthetic intrinsics have no side-effects, and just mark 1889 // information about their operands. 1890 // FIXME: There are other no-op synthetic instructions that potentially 1891 // should be considered at least *safe* to speculate... 1892 case Intrinsic::dbg_declare: 1893 case Intrinsic::dbg_value: 1894 return true; 1895 1896 case Intrinsic::bswap: 1897 case Intrinsic::ctlz: 1898 case Intrinsic::ctpop: 1899 case Intrinsic::cttz: 1900 case Intrinsic::objectsize: 1901 case Intrinsic::sadd_with_overflow: 1902 case Intrinsic::smul_with_overflow: 1903 case Intrinsic::ssub_with_overflow: 1904 case Intrinsic::uadd_with_overflow: 1905 case Intrinsic::umul_with_overflow: 1906 case Intrinsic::usub_with_overflow: 1907 return true; 1908 // TODO: some fp intrinsics are marked as having the same error handling 1909 // as libm. They're safe to speculate when they won't error. 1910 // TODO: are convert_{from,to}_fp16 safe? 1911 // TODO: can we list target-specific intrinsics here? 1912 default: break; 1913 } 1914 } 1915 return false; // The called function could have undefined behavior or 1916 // side-effects, even if marked readnone nounwind. 1917 } 1918 case Instruction::VAArg: 1919 case Instruction::Alloca: 1920 case Instruction::Invoke: 1921 case Instruction::PHI: 1922 case Instruction::Store: 1923 case Instruction::Ret: 1924 case Instruction::Br: 1925 case Instruction::IndirectBr: 1926 case Instruction::Switch: 1927 case Instruction::Unreachable: 1928 case Instruction::Fence: 1929 case Instruction::LandingPad: 1930 case Instruction::AtomicRMW: 1931 case Instruction::AtomicCmpXchg: 1932 case Instruction::Resume: 1933 return false; // Misc instructions which have effects 1934 } 1935} 1936