Lines Matching defs:Offset

356   void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
360 if (Size == 0 || Offset.uge(AllocSize)) {
361 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
369 uint64_t BeginOffset = Offset.getZExtValue();
380 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
409 APInt GEPOffset = Offset;
422 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
425 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
426 GEPOffset += Index * APInt(Offset.getBitWidth(),
441 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
452 Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
454 insertUse(I, Offset, Size, IsSplittable);
465 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
484 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
485 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
495 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
503 (IsOffsetKnown && Offset.uge(AllocSize)))
510 insertUse(II, Offset,
512 : AllocSize - Offset.getLimitedValue(),
535 if (Offset.uge(AllocSize)) {
542 uint64_t RawOffset = Offset.getLimitedValue();
553 return insertUse(II, Offset, Size, /*IsSplittable=*/false);
579 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
596 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
598 insertUse(II, Offset, Size, true);
669 if (Offset.uge(AllocSize)) {
674 insertUse(PN, Offset, PHISize);
709 if (Offset.uge(AllocSize)) {
714 insertUse(SI, Offset, SelectSize);
1330 Value *Ptr, Type *Ty, APInt &Offset,
1334 if (Offset == 0)
1350 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1351 APInt NumSkippedElements = Offset.sdiv(ElementSize);
1354 Offset -= NumSkippedElements * ElementSize;
1357 Offset, TargetTy, Indices, NamePrefix);
1362 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1363 APInt NumSkippedElements = Offset.sdiv(ElementSize);
1367 Offset -= NumSkippedElements * ElementSize;
1369 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1378 uint64_t StructOffset = Offset.getZExtValue();
1382 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1384 if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
1388 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1398 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1403 Value *Ptr, APInt Offset, Type *TargetTy,
1416 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1419 APInt NumSkippedElements = Offset.sdiv(ElementSize);
1421 Offset -= NumSkippedElements * ElementSize;
1423 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1427 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1443 APInt Offset, Type *PointerTy,
1459 APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1466 APInt GEPOffset(Offset.getBitWidth(), 0);
1469 Offset += GEPOffset;
1477 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1494 Int8PtrOffset = Offset;
1515 Int8PtrOffset = Offset;
1852 IntegerType *Ty, uint64_t Offset,
1856 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1858 uint64_t ShAmt = 8*Offset;
1860 ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
1875 Value *V, uint64_t Offset, const Twine &Name) {
1885 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
1887 uint64_t ShAmt = 8*Offset;
1889 ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
2116 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2140 APInt(DL.getPointerSizeInBits(), Offset), PointerTy,
2161 unsigned getIndex(uint64_t Offset) {
2163 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2193 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2194 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
2195 V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
2292 uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2293 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
2468 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2469 V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2639 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2640 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
2654 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2655 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
2996 uint64_t Offset, uint64_t Size) {
2997 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
2999 if (Offset > DL.getTypeAllocSize(Ty) ||
3000 (DL.getTypeAllocSize(Ty) - Offset) < Size)
3010 uint64_t NumSkippedElements = Offset / ElementSize;
3018 Offset -= NumSkippedElements * ElementSize;
3021 if (Offset > 0 || Size < ElementSize) {
3023 if ((Offset + Size) > ElementSize)
3026 return getTypePartition(DL, ElementTy, Offset, Size);
3028 assert(Offset == 0);
3044 if (Offset >= SL->getSizeInBytes())
3046 uint64_t EndOffset = Offset + Size;
3050 unsigned Index = SL->getElementContainingOffset(Offset);
3051 Offset -= SL->getElementOffset(Index);
3055 if (Offset >= ElementSize)
3059 if (Offset > 0 || Size < ElementSize) {
3060 if ((Offset + Size) > ElementSize)
3062 return getTypePartition(DL, ElementTy, Offset, Size);
3064 assert(Offset == 0);
3260 uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
3261 if (Offset >= MaxSplitUseEndOffset) {
3269 [Offset](const AllocaSlices::iterator &I) {
3270 return I->endOffset() <= Offset;