ValueTracking.cpp revision 0dabb0b177089202dae485d085ed15bd41ef29e6
1//===- ValueTracking.cpp - Walk computations to compute properties --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains routines that help analyze properties that chains of 11// computations have. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Analysis/ValueTracking.h" 16#include "llvm/Constants.h" 17#include "llvm/Instructions.h" 18#include "llvm/GlobalVariable.h" 19#include "llvm/IntrinsicInst.h" 20#include "llvm/Target/TargetData.h" 21#include "llvm/Support/GetElementPtrTypeIterator.h" 22#include "llvm/Support/MathExtras.h" 23#include <cstring> 24using namespace llvm; 25 26/// getOpcode - If this is an Instruction or a ConstantExpr, return the 27/// opcode value. Otherwise return UserOp1. 28static unsigned getOpcode(const Value *V) { 29 if (const Instruction *I = dyn_cast<Instruction>(V)) 30 return I->getOpcode(); 31 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 32 return CE->getOpcode(); 33 // Use UserOp1 to mean there's no opcode. 34 return Instruction::UserOp1; 35} 36 37 38/// ComputeMaskedBits - Determine which of the bits specified in Mask are 39/// known to be either zero or one and return them in the KnownZero/KnownOne 40/// bit sets. This code only analyzes bits in Mask, in order to short-circuit 41/// processing. 42/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 43/// we cannot optimize based on the assumption that it is zero without changing 44/// it to be an explicit zero. If we don't change it to zero, other code could 45/// optimized based on the contradictory assumption that it is non-zero. 46/// Because instcombine aggressively folds operations with undef args anyway, 47/// this won't lose us code quality. 48void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, 49 APInt &KnownZero, APInt &KnownOne, 50 TargetData *TD, unsigned Depth) { 51 const unsigned MaxDepth = 6; 52 assert(V && "No Value?"); 53 assert(Depth <= MaxDepth && "Limit Search Depth"); 54 unsigned BitWidth = Mask.getBitWidth(); 55 assert((V->getType()->isIntOrIntVector() || isa<PointerType>(V->getType())) && 56 "Not integer or pointer type!"); 57 assert((!TD || 58 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 59 (!V->getType()->isIntOrIntVector() || 60 V->getType()->getScalarSizeInBits() == BitWidth) && 61 KnownZero.getBitWidth() == BitWidth && 62 KnownOne.getBitWidth() == BitWidth && 63 "V, Mask, KnownOne and KnownZero should have same BitWidth"); 64 65 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 66 // We know all of the bits for a constant! 67 KnownOne = CI->getValue() & Mask; 68 KnownZero = ~KnownOne & Mask; 69 return; 70 } 71 // Null and aggregate-zero are all-zeros. 72 if (isa<ConstantPointerNull>(V) || 73 isa<ConstantAggregateZero>(V)) { 74 KnownOne.clear(); 75 KnownZero = Mask; 76 return; 77 } 78 // Handle a constant vector by taking the intersection of the known bits of 79 // each element. 80 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) { 81 KnownZero.set(); KnownOne.set(); 82 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 83 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0); 84 ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2, 85 TD, Depth); 86 KnownZero &= KnownZero2; 87 KnownOne &= KnownOne2; 88 } 89 return; 90 } 91 // The address of an aligned GlobalValue has trailing zeros. 92 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 93 unsigned Align = GV->getAlignment(); 94 if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) 95 Align = TD->getPrefTypeAlignment(GV->getType()->getElementType()); 96 if (Align > 0) 97 KnownZero = Mask & APInt::getLowBitsSet(BitWidth, 98 CountTrailingZeros_32(Align)); 99 else 100 KnownZero.clear(); 101 KnownOne.clear(); 102 return; 103 } 104 105 KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything. 106 107 if (Depth == MaxDepth || Mask == 0) 108 return; // Limit search depth. 109 110 User *I = dyn_cast<User>(V); 111 if (!I) return; 112 113 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 114 switch (getOpcode(I)) { 115 default: break; 116 case Instruction::And: { 117 // If either the LHS or the RHS are Zero, the result is zero. 118 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); 119 APInt Mask2(Mask & ~KnownZero); 120 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 121 Depth+1); 122 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 123 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 124 125 // Output known-1 bits are only known if set in both the LHS & RHS. 126 KnownOne &= KnownOne2; 127 // Output known-0 are known to be clear if zero in either the LHS | RHS. 128 KnownZero |= KnownZero2; 129 return; 130 } 131 case Instruction::Or: { 132 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); 133 APInt Mask2(Mask & ~KnownOne); 134 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 135 Depth+1); 136 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 137 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 138 139 // Output known-0 bits are only known if clear in both the LHS & RHS. 140 KnownZero &= KnownZero2; 141 // Output known-1 are known to be set if set in either the LHS | RHS. 142 KnownOne |= KnownOne2; 143 return; 144 } 145 case Instruction::Xor: { 146 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); 147 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD, 148 Depth+1); 149 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 150 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 151 152 // Output known-0 bits are known if clear or set in both the LHS & RHS. 153 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 154 // Output known-1 are known to be set if set in only one of the LHS, RHS. 155 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 156 KnownZero = KnownZeroOut; 157 return; 158 } 159 case Instruction::Mul: { 160 APInt Mask2 = APInt::getAllOnesValue(BitWidth); 161 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1); 162 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 163 Depth+1); 164 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 165 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 166 167 // If low bits are zero in either operand, output low known-0 bits. 168 // Also compute a conserative estimate for high known-0 bits. 169 // More trickiness is possible, but this is sufficient for the 170 // interesting case of alignment computation. 171 KnownOne.clear(); 172 unsigned TrailZ = KnownZero.countTrailingOnes() + 173 KnownZero2.countTrailingOnes(); 174 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 175 KnownZero2.countLeadingOnes(), 176 BitWidth) - BitWidth; 177 178 TrailZ = std::min(TrailZ, BitWidth); 179 LeadZ = std::min(LeadZ, BitWidth); 180 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 181 APInt::getHighBitsSet(BitWidth, LeadZ); 182 KnownZero &= Mask; 183 return; 184 } 185 case Instruction::UDiv: { 186 // For the purposes of computing leading zeros we can conservatively 187 // treat a udiv as a logical right shift by the power of 2 known to 188 // be less than the denominator. 189 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 190 ComputeMaskedBits(I->getOperand(0), 191 AllOnes, KnownZero2, KnownOne2, TD, Depth+1); 192 unsigned LeadZ = KnownZero2.countLeadingOnes(); 193 194 KnownOne2.clear(); 195 KnownZero2.clear(); 196 ComputeMaskedBits(I->getOperand(1), 197 AllOnes, KnownZero2, KnownOne2, TD, Depth+1); 198 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 199 if (RHSUnknownLeadingOnes != BitWidth) 200 LeadZ = std::min(BitWidth, 201 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 202 203 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask; 204 return; 205 } 206 case Instruction::Select: 207 ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1); 208 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD, 209 Depth+1); 210 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 211 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 212 213 // Only known if known in both the LHS and RHS. 214 KnownOne &= KnownOne2; 215 KnownZero &= KnownZero2; 216 return; 217 case Instruction::FPTrunc: 218 case Instruction::FPExt: 219 case Instruction::FPToUI: 220 case Instruction::FPToSI: 221 case Instruction::SIToFP: 222 case Instruction::UIToFP: 223 return; // Can't work with floating point. 224 case Instruction::PtrToInt: 225 case Instruction::IntToPtr: 226 // We can't handle these if we don't know the pointer size. 227 if (!TD) return; 228 // FALL THROUGH and handle them the same as zext/trunc. 229 case Instruction::ZExt: 230 case Instruction::Trunc: { 231 // Note that we handle pointer operands here because of inttoptr/ptrtoint 232 // which fall through here. 233 const Type *SrcTy = I->getOperand(0)->getType(); 234 unsigned SrcBitWidth = TD ? 235 TD->getTypeSizeInBits(SrcTy) : 236 SrcTy->getScalarSizeInBits(); 237 APInt MaskIn(Mask); 238 MaskIn.zextOrTrunc(SrcBitWidth); 239 KnownZero.zextOrTrunc(SrcBitWidth); 240 KnownOne.zextOrTrunc(SrcBitWidth); 241 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, 242 Depth+1); 243 KnownZero.zextOrTrunc(BitWidth); 244 KnownOne.zextOrTrunc(BitWidth); 245 // Any top bits are known to be zero. 246 if (BitWidth > SrcBitWidth) 247 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 248 return; 249 } 250 case Instruction::BitCast: { 251 const Type *SrcTy = I->getOperand(0)->getType(); 252 if ((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 253 // TODO: For now, not handling conversions like: 254 // (bitcast i64 %x to <2 x i32>) 255 !isa<VectorType>(I->getType())) { 256 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD, 257 Depth+1); 258 return; 259 } 260 break; 261 } 262 case Instruction::SExt: { 263 // Compute the bits in the result that are not present in the input. 264 const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType()); 265 unsigned SrcBitWidth = SrcTy->getBitWidth(); 266 267 APInt MaskIn(Mask); 268 MaskIn.trunc(SrcBitWidth); 269 KnownZero.trunc(SrcBitWidth); 270 KnownOne.trunc(SrcBitWidth); 271 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, 272 Depth+1); 273 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 274 KnownZero.zext(BitWidth); 275 KnownOne.zext(BitWidth); 276 277 // If the sign bit of the input is known set or clear, then we know the 278 // top bits of the result. 279 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 280 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 281 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 282 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 283 return; 284 } 285 case Instruction::Shl: 286 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 287 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 288 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 289 APInt Mask2(Mask.lshr(ShiftAmt)); 290 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, 291 Depth+1); 292 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 293 KnownZero <<= ShiftAmt; 294 KnownOne <<= ShiftAmt; 295 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 296 return; 297 } 298 break; 299 case Instruction::LShr: 300 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 301 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 302 // Compute the new bits that are at the top now. 303 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 304 305 // Unsigned shift right. 306 APInt Mask2(Mask.shl(ShiftAmt)); 307 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD, 308 Depth+1); 309 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 310 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 311 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 312 // high bits known zero. 313 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 314 return; 315 } 316 break; 317 case Instruction::AShr: 318 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 319 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 320 // Compute the new bits that are at the top now. 321 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 322 323 // Signed shift right. 324 APInt Mask2(Mask.shl(ShiftAmt)); 325 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, 326 Depth+1); 327 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 328 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 329 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 330 331 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 332 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 333 KnownZero |= HighBits; 334 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 335 KnownOne |= HighBits; 336 return; 337 } 338 break; 339 case Instruction::Sub: { 340 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) { 341 // We know that the top bits of C-X are clear if X contains less bits 342 // than C (i.e. no wrap-around can happen). For example, 20-X is 343 // positive if we can prove that X is >= 0 and < 16. 344 if (!CLHS->getValue().isNegative()) { 345 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 346 // NLZ can't be BitWidth with no sign bit 347 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 348 ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2, 349 TD, Depth+1); 350 351 // If all of the MaskV bits are known to be zero, then we know the 352 // output top bits are zero, because we now know that the output is 353 // from [0-C]. 354 if ((KnownZero2 & MaskV) == MaskV) { 355 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 356 // Top bits known zero. 357 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask; 358 } 359 } 360 } 361 } 362 // fall through 363 case Instruction::Add: { 364 // If one of the operands has trailing zeros, than the bits that the 365 // other operand has in those bit positions will be preserved in the 366 // result. For an add, this works with either operand. For a subtract, 367 // this only works if the known zeros are in the right operand. 368 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 369 APInt Mask2 = APInt::getLowBitsSet(BitWidth, 370 BitWidth - Mask.countLeadingZeros()); 371 ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD, 372 Depth+1); 373 assert((LHSKnownZero & LHSKnownOne) == 0 && 374 "Bits known to be one AND zero?"); 375 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); 376 377 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, TD, 378 Depth+1); 379 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 380 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); 381 382 // Determine which operand has more trailing zeros, and use that 383 // many bits from the other operand. 384 if (LHSKnownZeroOut > RHSKnownZeroOut) { 385 if (getOpcode(I) == Instruction::Add) { 386 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); 387 KnownZero |= KnownZero2 & Mask; 388 KnownOne |= KnownOne2 & Mask; 389 } else { 390 // If the known zeros are in the left operand for a subtract, 391 // fall back to the minimum known zeros in both operands. 392 KnownZero |= APInt::getLowBitsSet(BitWidth, 393 std::min(LHSKnownZeroOut, 394 RHSKnownZeroOut)); 395 } 396 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { 397 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); 398 KnownZero |= LHSKnownZero & Mask; 399 KnownOne |= LHSKnownOne & Mask; 400 } 401 return; 402 } 403 case Instruction::SRem: 404 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 405 APInt RA = Rem->getValue(); 406 if (RA.isPowerOf2() || (-RA).isPowerOf2()) { 407 APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA; 408 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); 409 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 410 Depth+1); 411 412 // If the sign bit of the first operand is zero, the sign bit of 413 // the result is zero. If the first operand has no one bits below 414 // the second operand's single 1 bit, its sign will be zero. 415 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 416 KnownZero2 |= ~LowBits; 417 418 KnownZero |= KnownZero2 & Mask; 419 420 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 421 } 422 } 423 break; 424 case Instruction::URem: { 425 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 426 APInt RA = Rem->getValue(); 427 if (RA.isPowerOf2()) { 428 APInt LowBits = (RA - 1); 429 APInt Mask2 = LowBits & Mask; 430 KnownZero |= ~LowBits & Mask; 431 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, 432 Depth+1); 433 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 434 break; 435 } 436 } 437 438 // Since the result is less than or equal to either operand, any leading 439 // zero bits in either operand must also exist in the result. 440 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 441 ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne, 442 TD, Depth+1); 443 ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2, 444 TD, Depth+1); 445 446 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 447 KnownZero2.countLeadingOnes()); 448 KnownOne.clear(); 449 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask; 450 break; 451 } 452 453 case Instruction::Alloca: 454 case Instruction::Malloc: { 455 AllocationInst *AI = cast<AllocationInst>(V); 456 unsigned Align = AI->getAlignment(); 457 if (Align == 0 && TD) { 458 if (isa<AllocaInst>(AI)) 459 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 460 else if (isa<MallocInst>(AI)) { 461 // Malloc returns maximally aligned memory. 462 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 463 Align = 464 std::max(Align, 465 (unsigned)TD->getABITypeAlignment(Type::DoubleTy)); 466 Align = 467 std::max(Align, 468 (unsigned)TD->getABITypeAlignment(Type::Int64Ty)); 469 } 470 } 471 472 if (Align > 0) 473 KnownZero = Mask & APInt::getLowBitsSet(BitWidth, 474 CountTrailingZeros_32(Align)); 475 break; 476 } 477 case Instruction::GetElementPtr: { 478 // Analyze all of the subscripts of this getelementptr instruction 479 // to determine if we can prove known low zero bits. 480 APInt LocalMask = APInt::getAllOnesValue(BitWidth); 481 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 482 ComputeMaskedBits(I->getOperand(0), LocalMask, 483 LocalKnownZero, LocalKnownOne, TD, Depth+1); 484 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 485 486 gep_type_iterator GTI = gep_type_begin(I); 487 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 488 Value *Index = I->getOperand(i); 489 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 490 // Handle struct member offset arithmetic. 491 if (!TD) return; 492 const StructLayout *SL = TD->getStructLayout(STy); 493 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 494 uint64_t Offset = SL->getElementOffset(Idx); 495 TrailZ = std::min(TrailZ, 496 CountTrailingZeros_64(Offset)); 497 } else { 498 // Handle array index arithmetic. 499 const Type *IndexedTy = GTI.getIndexedType(); 500 if (!IndexedTy->isSized()) return; 501 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 502 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; 503 LocalMask = APInt::getAllOnesValue(GEPOpiBits); 504 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 505 ComputeMaskedBits(Index, LocalMask, 506 LocalKnownZero, LocalKnownOne, TD, Depth+1); 507 TrailZ = std::min(TrailZ, 508 unsigned(CountTrailingZeros_64(TypeSize) + 509 LocalKnownZero.countTrailingOnes())); 510 } 511 } 512 513 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask; 514 break; 515 } 516 case Instruction::PHI: { 517 PHINode *P = cast<PHINode>(I); 518 // Handle the case of a simple two-predecessor recurrence PHI. 519 // There's a lot more that could theoretically be done here, but 520 // this is sufficient to catch some interesting cases. 521 if (P->getNumIncomingValues() == 2) { 522 for (unsigned i = 0; i != 2; ++i) { 523 Value *L = P->getIncomingValue(i); 524 Value *R = P->getIncomingValue(!i); 525 User *LU = dyn_cast<User>(L); 526 if (!LU) 527 continue; 528 unsigned Opcode = getOpcode(LU); 529 // Check for operations that have the property that if 530 // both their operands have low zero bits, the result 531 // will have low zero bits. 532 if (Opcode == Instruction::Add || 533 Opcode == Instruction::Sub || 534 Opcode == Instruction::And || 535 Opcode == Instruction::Or || 536 Opcode == Instruction::Mul) { 537 Value *LL = LU->getOperand(0); 538 Value *LR = LU->getOperand(1); 539 // Find a recurrence. 540 if (LL == I) 541 L = LR; 542 else if (LR == I) 543 L = LL; 544 else 545 break; 546 // Ok, we have a PHI of the form L op= R. Check for low 547 // zero bits. 548 APInt Mask2 = APInt::getAllOnesValue(BitWidth); 549 ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1); 550 Mask2 = APInt::getLowBitsSet(BitWidth, 551 KnownZero2.countTrailingOnes()); 552 553 // We need to take the minimum number of known bits 554 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 555 ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1); 556 557 KnownZero = Mask & 558 APInt::getLowBitsSet(BitWidth, 559 std::min(KnownZero2.countTrailingOnes(), 560 KnownZero3.countTrailingOnes())); 561 break; 562 } 563 } 564 } 565 566 // Otherwise take the unions of the known bit sets of the operands, 567 // taking conservative care to avoid excessive recursion. 568 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 569 KnownZero = APInt::getAllOnesValue(BitWidth); 570 KnownOne = APInt::getAllOnesValue(BitWidth); 571 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { 572 // Skip direct self references. 573 if (P->getIncomingValue(i) == P) continue; 574 575 KnownZero2 = APInt(BitWidth, 0); 576 KnownOne2 = APInt(BitWidth, 0); 577 // Recurse, but cap the recursion to one level, because we don't 578 // want to waste time spinning around in loops. 579 ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne, 580 KnownZero2, KnownOne2, TD, MaxDepth-1); 581 KnownZero &= KnownZero2; 582 KnownOne &= KnownOne2; 583 // If all bits have been ruled out, there's no need to check 584 // more operands. 585 if (!KnownZero && !KnownOne) 586 break; 587 } 588 } 589 break; 590 } 591 case Instruction::Call: 592 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 593 switch (II->getIntrinsicID()) { 594 default: break; 595 case Intrinsic::ctpop: 596 case Intrinsic::ctlz: 597 case Intrinsic::cttz: { 598 unsigned LowBits = Log2_32(BitWidth)+1; 599 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 600 break; 601 } 602 } 603 } 604 break; 605 } 606} 607 608/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 609/// this predicate to simplify operations downstream. Mask is known to be zero 610/// for bits that V cannot have. 611bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, 612 TargetData *TD, unsigned Depth) { 613 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 614 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth); 615 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 616 return (KnownZero & Mask) == Mask; 617} 618 619 620 621/// ComputeNumSignBits - Return the number of times the sign bit of the 622/// register is replicated into the other bits. We know that at least 1 bit 623/// is always equal to the sign bit (itself), but other cases can give us 624/// information. For example, immediately after an "ashr X, 2", we know that 625/// the top 3 bits are all equal to each other, so we return 3. 626/// 627/// 'Op' must have a scalar integer type. 628/// 629unsigned llvm::ComputeNumSignBits(Value *V, TargetData *TD, unsigned Depth) { 630 assert((TD || V->getType()->isIntOrIntVector()) && 631 "ComputeNumSignBits requires a TargetData object to operate " 632 "on non-integer values!"); 633 const Type *Ty = V->getType(); 634 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : 635 Ty->getScalarSizeInBits(); 636 unsigned Tmp, Tmp2; 637 unsigned FirstAnswer = 1; 638 639 // Note that ConstantInt is handled by the general ComputeMaskedBits case 640 // below. 641 642 if (Depth == 6) 643 return 1; // Limit search depth. 644 645 User *U = dyn_cast<User>(V); 646 switch (getOpcode(V)) { 647 default: break; 648 case Instruction::SExt: 649 Tmp = TyBits-cast<IntegerType>(U->getOperand(0)->getType())->getBitWidth(); 650 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; 651 652 case Instruction::AShr: 653 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 654 // ashr X, C -> adds C sign bits. 655 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) { 656 Tmp += C->getZExtValue(); 657 if (Tmp > TyBits) Tmp = TyBits; 658 } 659 return Tmp; 660 case Instruction::Shl: 661 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) { 662 // shl destroys sign bits. 663 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 664 if (C->getZExtValue() >= TyBits || // Bad shift. 665 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 666 return Tmp - C->getZExtValue(); 667 } 668 break; 669 case Instruction::And: 670 case Instruction::Or: 671 case Instruction::Xor: // NOT is handled here. 672 // Logical binary ops preserve the number of sign bits at the worst. 673 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 674 if (Tmp != 1) { 675 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 676 FirstAnswer = std::min(Tmp, Tmp2); 677 // We computed what we know about the sign bits as our first 678 // answer. Now proceed to the generic code that uses 679 // ComputeMaskedBits, and pick whichever answer is better. 680 } 681 break; 682 683 case Instruction::Select: 684 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 685 if (Tmp == 1) return 1; // Early out. 686 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1); 687 return std::min(Tmp, Tmp2); 688 689 case Instruction::Add: 690 // Add can have at most one carry bit. Thus we know that the output 691 // is, at worst, one more bit than the inputs. 692 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 693 if (Tmp == 1) return 1; // Early out. 694 695 // Special case decrementing a value (ADD X, -1): 696 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) 697 if (CRHS->isAllOnesValue()) { 698 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 699 APInt Mask = APInt::getAllOnesValue(TyBits); 700 ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD, 701 Depth+1); 702 703 // If the input is known to be 0 or 1, the output is 0/-1, which is all 704 // sign bits set. 705 if ((KnownZero | APInt(TyBits, 1)) == Mask) 706 return TyBits; 707 708 // If we are subtracting one from a positive number, there is no carry 709 // out of the result. 710 if (KnownZero.isNegative()) 711 return Tmp; 712 } 713 714 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 715 if (Tmp2 == 1) return 1; 716 return std::min(Tmp, Tmp2)-1; 717 break; 718 719 case Instruction::Sub: 720 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 721 if (Tmp2 == 1) return 1; 722 723 // Handle NEG. 724 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) 725 if (CLHS->isNullValue()) { 726 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 727 APInt Mask = APInt::getAllOnesValue(TyBits); 728 ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne, 729 TD, Depth+1); 730 // If the input is known to be 0 or 1, the output is 0/-1, which is all 731 // sign bits set. 732 if ((KnownZero | APInt(TyBits, 1)) == Mask) 733 return TyBits; 734 735 // If the input is known to be positive (the sign bit is known clear), 736 // the output of the NEG has the same number of sign bits as the input. 737 if (KnownZero.isNegative()) 738 return Tmp2; 739 740 // Otherwise, we treat this like a SUB. 741 } 742 743 // Sub can have at most one carry bit. Thus we know that the output 744 // is, at worst, one more bit than the inputs. 745 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 746 if (Tmp == 1) return 1; // Early out. 747 return std::min(Tmp, Tmp2)-1; 748 break; 749 case Instruction::Trunc: 750 // FIXME: it's tricky to do anything useful for this, but it is an important 751 // case for targets like X86. 752 break; 753 } 754 755 // Finally, if we can prove that the top bits of the result are 0's or 1's, 756 // use this information. 757 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 758 APInt Mask = APInt::getAllOnesValue(TyBits); 759 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth); 760 761 if (KnownZero.isNegative()) { // sign bit is 0 762 Mask = KnownZero; 763 } else if (KnownOne.isNegative()) { // sign bit is 1; 764 Mask = KnownOne; 765 } else { 766 // Nothing known. 767 return FirstAnswer; 768 } 769 770 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 771 // the number of identical bits in the top of the input value. 772 Mask = ~Mask; 773 Mask <<= Mask.getBitWidth()-TyBits; 774 // Return # leading zeros. We use 'min' here in case Val was zero before 775 // shifting. We don't want to return '64' as for an i32 "0". 776 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 777} 778 779/// CannotBeNegativeZero - Return true if we can prove that the specified FP 780/// value is never equal to -0.0. 781/// 782/// NOTE: this function will need to be revisited when we support non-default 783/// rounding modes! 784/// 785bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 786 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 787 return !CFP->getValueAPF().isNegZero(); 788 789 if (Depth == 6) 790 return 1; // Limit search depth. 791 792 const Instruction *I = dyn_cast<Instruction>(V); 793 if (I == 0) return false; 794 795 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 796 if (I->getOpcode() == Instruction::FAdd && 797 isa<ConstantFP>(I->getOperand(1)) && 798 cast<ConstantFP>(I->getOperand(1))->isNullValue()) 799 return true; 800 801 // sitofp and uitofp turn into +0.0 for zero. 802 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 803 return true; 804 805 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 806 // sqrt(-0.0) = -0.0, no other negative results are possible. 807 if (II->getIntrinsicID() == Intrinsic::sqrt) 808 return CannotBeNegativeZero(II->getOperand(1), Depth+1); 809 810 if (const CallInst *CI = dyn_cast<CallInst>(I)) 811 if (const Function *F = CI->getCalledFunction()) { 812 if (F->isDeclaration()) { 813 switch (F->getNameLen()) { 814 case 3: // abs(x) != -0.0 815 if (!strcmp(F->getNameStart(), "abs")) return true; 816 break; 817 case 4: // abs[lf](x) != -0.0 818 if (!strcmp(F->getNameStart(), "absf")) return true; 819 if (!strcmp(F->getNameStart(), "absl")) return true; 820 break; 821 } 822 } 823 } 824 825 return false; 826} 827 828// This is the recursive version of BuildSubAggregate. It takes a few different 829// arguments. Idxs is the index within the nested struct From that we are 830// looking at now (which is of type IndexedType). IdxSkip is the number of 831// indices from Idxs that should be left out when inserting into the resulting 832// struct. To is the result struct built so far, new insertvalue instructions 833// build on that. 834Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType, 835 SmallVector<unsigned, 10> &Idxs, 836 unsigned IdxSkip, 837 Instruction *InsertBefore) { 838 const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType); 839 if (STy) { 840 // Save the original To argument so we can modify it 841 Value *OrigTo = To; 842 // General case, the type indexed by Idxs is a struct 843 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 844 // Process each struct element recursively 845 Idxs.push_back(i); 846 Value *PrevTo = To; 847 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 848 InsertBefore); 849 Idxs.pop_back(); 850 if (!To) { 851 // Couldn't find any inserted value for this index? Cleanup 852 while (PrevTo != OrigTo) { 853 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 854 PrevTo = Del->getAggregateOperand(); 855 Del->eraseFromParent(); 856 } 857 // Stop processing elements 858 break; 859 } 860 } 861 // If we succesfully found a value for each of our subaggregates 862 if (To) 863 return To; 864 } 865 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 866 // the struct's elements had a value that was inserted directly. In the latter 867 // case, perhaps we can't determine each of the subelements individually, but 868 // we might be able to find the complete struct somewhere. 869 870 // Find the value that is at that particular spot 871 Value *V = FindInsertedValue(From, Idxs.begin(), Idxs.end()); 872 873 if (!V) 874 return NULL; 875 876 // Insert the value in the new (sub) aggregrate 877 return llvm::InsertValueInst::Create(To, V, Idxs.begin() + IdxSkip, 878 Idxs.end(), "tmp", InsertBefore); 879} 880 881// This helper takes a nested struct and extracts a part of it (which is again a 882// struct) into a new value. For example, given the struct: 883// { a, { b, { c, d }, e } } 884// and the indices "1, 1" this returns 885// { c, d }. 886// 887// It does this by inserting an insertvalue for each element in the resulting 888// struct, as opposed to just inserting a single struct. This will only work if 889// each of the elements of the substruct are known (ie, inserted into From by an 890// insertvalue instruction somewhere). 891// 892// All inserted insertvalue instructions are inserted before InsertBefore 893Value *BuildSubAggregate(Value *From, const unsigned *idx_begin, 894 const unsigned *idx_end, Instruction *InsertBefore) { 895 assert(InsertBefore && "Must have someplace to insert!"); 896 const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 897 idx_begin, 898 idx_end); 899 Value *To = UndefValue::get(IndexedType); 900 SmallVector<unsigned, 10> Idxs(idx_begin, idx_end); 901 unsigned IdxSkip = Idxs.size(); 902 903 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 904} 905 906/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if 907/// the scalar value indexed is already around as a register, for example if it 908/// were inserted directly into the aggregrate. 909/// 910/// If InsertBefore is not null, this function will duplicate (modified) 911/// insertvalues when a part of a nested struct is extracted. 912Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin, 913 const unsigned *idx_end, Instruction *InsertBefore) { 914 // Nothing to index? Just return V then (this is useful at the end of our 915 // recursion) 916 if (idx_begin == idx_end) 917 return V; 918 // We have indices, so V should have an indexable type 919 assert((isa<StructType>(V->getType()) || isa<ArrayType>(V->getType())) 920 && "Not looking at a struct or array?"); 921 assert(ExtractValueInst::getIndexedType(V->getType(), idx_begin, idx_end) 922 && "Invalid indices for type?"); 923 const CompositeType *PTy = cast<CompositeType>(V->getType()); 924 925 if (isa<UndefValue>(V)) 926 return UndefValue::get(ExtractValueInst::getIndexedType(PTy, 927 idx_begin, 928 idx_end)); 929 else if (isa<ConstantAggregateZero>(V)) 930 return Constant::getNullValue(ExtractValueInst::getIndexedType(PTy, 931 idx_begin, 932 idx_end)); 933 else if (Constant *C = dyn_cast<Constant>(V)) { 934 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) 935 // Recursively process this constant 936 return FindInsertedValue(C->getOperand(*idx_begin), idx_begin + 1, idx_end, 937 InsertBefore); 938 } else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 939 // Loop the indices for the insertvalue instruction in parallel with the 940 // requested indices 941 const unsigned *req_idx = idx_begin; 942 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 943 i != e; ++i, ++req_idx) { 944 if (req_idx == idx_end) { 945 if (InsertBefore) 946 // The requested index identifies a part of a nested aggregate. Handle 947 // this specially. For example, 948 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 949 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 950 // %C = extractvalue {i32, { i32, i32 } } %B, 1 951 // This can be changed into 952 // %A = insertvalue {i32, i32 } undef, i32 10, 0 953 // %C = insertvalue {i32, i32 } %A, i32 11, 1 954 // which allows the unused 0,0 element from the nested struct to be 955 // removed. 956 return BuildSubAggregate(V, idx_begin, req_idx, InsertBefore); 957 else 958 // We can't handle this without inserting insertvalues 959 return 0; 960 } 961 962 // This insert value inserts something else than what we are looking for. 963 // See if the (aggregrate) value inserted into has the value we are 964 // looking for, then. 965 if (*req_idx != *i) 966 return FindInsertedValue(I->getAggregateOperand(), idx_begin, idx_end, 967 InsertBefore); 968 } 969 // If we end up here, the indices of the insertvalue match with those 970 // requested (though possibly only partially). Now we recursively look at 971 // the inserted value, passing any remaining indices. 972 return FindInsertedValue(I->getInsertedValueOperand(), req_idx, idx_end, 973 InsertBefore); 974 } else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 975 // If we're extracting a value from an aggregrate that was extracted from 976 // something else, we can extract from that something else directly instead. 977 // However, we will need to chain I's indices with the requested indices. 978 979 // Calculate the number of indices required 980 unsigned size = I->getNumIndices() + (idx_end - idx_begin); 981 // Allocate some space to put the new indices in 982 SmallVector<unsigned, 5> Idxs; 983 Idxs.reserve(size); 984 // Add indices from the extract value instruction 985 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 986 i != e; ++i) 987 Idxs.push_back(*i); 988 989 // Add requested indices 990 for (const unsigned *i = idx_begin, *e = idx_end; i != e; ++i) 991 Idxs.push_back(*i); 992 993 assert(Idxs.size() == size 994 && "Number of indices added not correct?"); 995 996 return FindInsertedValue(I->getAggregateOperand(), Idxs.begin(), Idxs.end(), 997 InsertBefore); 998 } 999 // Otherwise, we don't know (such as, extracting from a function return value 1000 // or load instruction) 1001 return 0; 1002} 1003 1004/// GetConstantStringInfo - This function computes the length of a 1005/// null-terminated C string pointed to by V. If successful, it returns true 1006/// and returns the string in Str. If unsuccessful, it returns false. 1007bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset, 1008 bool StopAtNul) { 1009 // If V is NULL then return false; 1010 if (V == NULL) return false; 1011 1012 // Look through bitcast instructions. 1013 if (BitCastInst *BCI = dyn_cast<BitCastInst>(V)) 1014 return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul); 1015 1016 // If the value is not a GEP instruction nor a constant expression with a 1017 // GEP instruction, then return false because ConstantArray can't occur 1018 // any other way 1019 User *GEP = 0; 1020 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) { 1021 GEP = GEPI; 1022 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 1023 if (CE->getOpcode() == Instruction::BitCast) 1024 return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul); 1025 if (CE->getOpcode() != Instruction::GetElementPtr) 1026 return false; 1027 GEP = CE; 1028 } 1029 1030 if (GEP) { 1031 // Make sure the GEP has exactly three arguments. 1032 if (GEP->getNumOperands() != 3) 1033 return false; 1034 1035 // Make sure the index-ee is a pointer to array of i8. 1036 const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 1037 const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 1038 if (AT == 0 || AT->getElementType() != Type::Int8Ty) 1039 return false; 1040 1041 // Check to make sure that the first operand of the GEP is an integer and 1042 // has value 0 so that we are sure we're indexing into the initializer. 1043 ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 1044 if (FirstIdx == 0 || !FirstIdx->isZero()) 1045 return false; 1046 1047 // If the second index isn't a ConstantInt, then this is a variable index 1048 // into the array. If this occurs, we can't say anything meaningful about 1049 // the string. 1050 uint64_t StartIdx = 0; 1051 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 1052 StartIdx = CI->getZExtValue(); 1053 else 1054 return false; 1055 return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset, 1056 StopAtNul); 1057 } 1058 1059 // The GEP instruction, constant or instruction, must reference a global 1060 // variable that is a constant and is initialized. The referenced constant 1061 // initializer is the array that we'll use for optimization. 1062 GlobalVariable* GV = dyn_cast<GlobalVariable>(V); 1063 if (!GV || !GV->isConstant() || !GV->hasInitializer()) 1064 return false; 1065 Constant *GlobalInit = GV->getInitializer(); 1066 1067 // Handle the ConstantAggregateZero case 1068 if (isa<ConstantAggregateZero>(GlobalInit)) { 1069 // This is a degenerate case. The initializer is constant zero so the 1070 // length of the string must be zero. 1071 Str.clear(); 1072 return true; 1073 } 1074 1075 // Must be a Constant Array 1076 ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit); 1077 if (Array == 0 || Array->getType()->getElementType() != Type::Int8Ty) 1078 return false; 1079 1080 // Get the number of elements in the array 1081 uint64_t NumElts = Array->getType()->getNumElements(); 1082 1083 if (Offset > NumElts) 1084 return false; 1085 1086 // Traverse the constant array from 'Offset' which is the place the GEP refers 1087 // to in the array. 1088 Str.reserve(NumElts-Offset); 1089 for (unsigned i = Offset; i != NumElts; ++i) { 1090 Constant *Elt = Array->getOperand(i); 1091 ConstantInt *CI = dyn_cast<ConstantInt>(Elt); 1092 if (!CI) // This array isn't suitable, non-int initializer. 1093 return false; 1094 if (StopAtNul && CI->isZero()) 1095 return true; // we found end of string, success! 1096 Str += (char)CI->getZExtValue(); 1097 } 1098 1099 // The array isn't null terminated, but maybe this is a memcpy, not a strcpy. 1100 return true; 1101} 1102