SROA.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// \file 10/// This transformation implements the well known scalar replacement of 11/// aggregates transformation. It tries to identify promotable elements of an 12/// aggregate alloca, and promote them to registers. It will also try to 13/// convert uses of an element (or set of elements) of an alloca into a vector 14/// or bitfield-style integer scalar if appropriate. 15/// 16/// It works to do this with minimal slicing of the alloca so that regions 17/// which are merely transferred in and out of external memory remain unchanged 18/// and are not decomposed to scalar code. 19/// 20/// Because this also performs alloca promotion, it can be thought of as also 21/// serving the purpose of SSA formation. The algorithm iterates on the 22/// function until all opportunities for promotion have been realized. 23/// 24//===----------------------------------------------------------------------===// 25 26#include "llvm/Transforms/Scalar.h" 27#include "llvm/ADT/STLExtras.h" 28#include "llvm/ADT/SetVector.h" 29#include "llvm/ADT/SmallVector.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/Analysis/Loads.h" 32#include "llvm/Analysis/PtrUseVisitor.h" 33#include "llvm/Analysis/ValueTracking.h" 34#include "llvm/IR/Constants.h" 35#include "llvm/IR/DIBuilder.h" 36#include "llvm/IR/DataLayout.h" 37#include "llvm/IR/DebugInfo.h" 38#include "llvm/IR/DerivedTypes.h" 39#include "llvm/IR/Dominators.h" 40#include "llvm/IR/Function.h" 41#include "llvm/IR/IRBuilder.h" 42#include "llvm/IR/InstVisitor.h" 43#include "llvm/IR/Instructions.h" 44#include "llvm/IR/IntrinsicInst.h" 45#include "llvm/IR/LLVMContext.h" 46#include "llvm/IR/Operator.h" 47#include "llvm/Pass.h" 48#include "llvm/Support/CommandLine.h" 49#include "llvm/Support/Compiler.h" 50#include "llvm/Support/Debug.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/TimeValue.h" 54#include "llvm/Support/raw_ostream.h" 55#include "llvm/Transforms/Utils/Local.h" 56#include "llvm/Transforms/Utils/PromoteMemToReg.h" 57#include "llvm/Transforms/Utils/SSAUpdater.h" 58 59#if __cplusplus >= 201103L && !defined(NDEBUG) 60// We only use this for a debug check in C++11 61#include <random> 62#endif 63 64using namespace llvm; 65 66#define DEBUG_TYPE "sroa" 67 68STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 69STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 70STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 71STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 72STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 73STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 74STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 75STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 76STATISTIC(NumDeleted, "Number of instructions deleted"); 77STATISTIC(NumVectorized, "Number of vectorized aggregates"); 78 79/// Hidden option to force the pass to not use DomTree and mem2reg, instead 80/// forming SSA values through the SSAUpdater infrastructure. 81static cl::opt<bool> 82ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); 83 84/// Hidden option to enable randomly shuffling the slices to help uncover 85/// instability in their order. 86static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices", 87 cl::init(false), cl::Hidden); 88 89/// Hidden option to experiment with completely strict handling of inbounds 90/// GEPs. 91static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", 92 cl::init(false), cl::Hidden); 93 94namespace { 95/// \brief A custom IRBuilder inserter which prefixes all names if they are 96/// preserved. 97template <bool preserveNames = true> 98class IRBuilderPrefixedInserter : 99 public IRBuilderDefaultInserter<preserveNames> { 100 std::string Prefix; 101 102public: 103 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 104 105protected: 106 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 107 BasicBlock::iterator InsertPt) const { 108 IRBuilderDefaultInserter<preserveNames>::InsertHelper( 109 I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt); 110 } 111}; 112 113// Specialization for not preserving the name is trivial. 114template <> 115class IRBuilderPrefixedInserter<false> : 116 public IRBuilderDefaultInserter<false> { 117public: 118 void SetNamePrefix(const Twine &P) {} 119}; 120 121/// \brief Provide a typedef for IRBuilder that drops names in release builds. 122#ifndef NDEBUG 123typedef llvm::IRBuilder<true, ConstantFolder, 124 IRBuilderPrefixedInserter<true> > IRBuilderTy; 125#else 126typedef llvm::IRBuilder<false, ConstantFolder, 127 IRBuilderPrefixedInserter<false> > IRBuilderTy; 128#endif 129} 130 131namespace { 132/// \brief A used slice of an alloca. 133/// 134/// This structure represents a slice of an alloca used by some instruction. It 135/// stores both the begin and end offsets of this use, a pointer to the use 136/// itself, and a flag indicating whether we can classify the use as splittable 137/// or not when forming partitions of the alloca. 138class Slice { 139 /// \brief The beginning offset of the range. 140 uint64_t BeginOffset; 141 142 /// \brief The ending offset, not included in the range. 143 uint64_t EndOffset; 144 145 /// \brief Storage for both the use of this slice and whether it can be 146 /// split. 147 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 148 149public: 150 Slice() : BeginOffset(), EndOffset() {} 151 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 152 : BeginOffset(BeginOffset), EndOffset(EndOffset), 153 UseAndIsSplittable(U, IsSplittable) {} 154 155 uint64_t beginOffset() const { return BeginOffset; } 156 uint64_t endOffset() const { return EndOffset; } 157 158 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 159 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 160 161 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 162 163 bool isDead() const { return getUse() == nullptr; } 164 void kill() { UseAndIsSplittable.setPointer(nullptr); } 165 166 /// \brief Support for ordering ranges. 167 /// 168 /// This provides an ordering over ranges such that start offsets are 169 /// always increasing, and within equal start offsets, the end offsets are 170 /// decreasing. Thus the spanning range comes first in a cluster with the 171 /// same start position. 172 bool operator<(const Slice &RHS) const { 173 if (beginOffset() < RHS.beginOffset()) return true; 174 if (beginOffset() > RHS.beginOffset()) return false; 175 if (isSplittable() != RHS.isSplittable()) return !isSplittable(); 176 if (endOffset() > RHS.endOffset()) return true; 177 return false; 178 } 179 180 /// \brief Support comparison with a single offset to allow binary searches. 181 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 182 uint64_t RHSOffset) { 183 return LHS.beginOffset() < RHSOffset; 184 } 185 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 186 const Slice &RHS) { 187 return LHSOffset < RHS.beginOffset(); 188 } 189 190 bool operator==(const Slice &RHS) const { 191 return isSplittable() == RHS.isSplittable() && 192 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 193 } 194 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 195}; 196} // end anonymous namespace 197 198namespace llvm { 199template <typename T> struct isPodLike; 200template <> struct isPodLike<Slice> { 201 static const bool value = true; 202}; 203} 204 205namespace { 206/// \brief Representation of the alloca slices. 207/// 208/// This class represents the slices of an alloca which are formed by its 209/// various uses. If a pointer escapes, we can't fully build a representation 210/// for the slices used and we reflect that in this structure. The uses are 211/// stored, sorted by increasing beginning offset and with unsplittable slices 212/// starting at a particular offset before splittable slices. 213class AllocaSlices { 214public: 215 /// \brief Construct the slices of a particular alloca. 216 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 217 218 /// \brief Test whether a pointer to the allocation escapes our analysis. 219 /// 220 /// If this is true, the slices are never fully built and should be 221 /// ignored. 222 bool isEscaped() const { return PointerEscapingInstr; } 223 224 /// \brief Support for iterating over the slices. 225 /// @{ 226 typedef SmallVectorImpl<Slice>::iterator iterator; 227 iterator begin() { return Slices.begin(); } 228 iterator end() { return Slices.end(); } 229 230 typedef SmallVectorImpl<Slice>::const_iterator const_iterator; 231 const_iterator begin() const { return Slices.begin(); } 232 const_iterator end() const { return Slices.end(); } 233 /// @} 234 235 /// \brief Allow iterating the dead users for this alloca. 236 /// 237 /// These are instructions which will never actually use the alloca as they 238 /// are outside the allocated range. They are safe to replace with undef and 239 /// delete. 240 /// @{ 241 typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator; 242 dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); } 243 dead_user_iterator dead_user_end() const { return DeadUsers.end(); } 244 /// @} 245 246 /// \brief Allow iterating the dead expressions referring to this alloca. 247 /// 248 /// These are operands which have cannot actually be used to refer to the 249 /// alloca as they are outside its range and the user doesn't correct for 250 /// that. These mostly consist of PHI node inputs and the like which we just 251 /// need to replace with undef. 252 /// @{ 253 typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator; 254 dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); } 255 dead_op_iterator dead_op_end() const { return DeadOperands.end(); } 256 /// @} 257 258#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 259 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 260 void printSlice(raw_ostream &OS, const_iterator I, 261 StringRef Indent = " ") const; 262 void printUse(raw_ostream &OS, const_iterator I, 263 StringRef Indent = " ") const; 264 void print(raw_ostream &OS) const; 265 void dump(const_iterator I) const; 266 void dump() const; 267#endif 268 269private: 270 template <typename DerivedT, typename RetT = void> class BuilderBase; 271 class SliceBuilder; 272 friend class AllocaSlices::SliceBuilder; 273 274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 275 /// \brief Handle to alloca instruction to simplify method interfaces. 276 AllocaInst &AI; 277#endif 278 279 /// \brief The instruction responsible for this alloca not having a known set 280 /// of slices. 281 /// 282 /// When an instruction (potentially) escapes the pointer to the alloca, we 283 /// store a pointer to that here and abort trying to form slices of the 284 /// alloca. This will be null if the alloca slices are analyzed successfully. 285 Instruction *PointerEscapingInstr; 286 287 /// \brief The slices of the alloca. 288 /// 289 /// We store a vector of the slices formed by uses of the alloca here. This 290 /// vector is sorted by increasing begin offset, and then the unsplittable 291 /// slices before the splittable ones. See the Slice inner class for more 292 /// details. 293 SmallVector<Slice, 8> Slices; 294 295 /// \brief Instructions which will become dead if we rewrite the alloca. 296 /// 297 /// Note that these are not separated by slice. This is because we expect an 298 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 299 /// all these instructions can simply be removed and replaced with undef as 300 /// they come from outside of the allocated space. 301 SmallVector<Instruction *, 8> DeadUsers; 302 303 /// \brief Operands which will become dead if we rewrite the alloca. 304 /// 305 /// These are operands that in their particular use can be replaced with 306 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs 307 /// to PHI nodes and the like. They aren't entirely dead (there might be 308 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 309 /// want to swap this particular input for undef to simplify the use lists of 310 /// the alloca. 311 SmallVector<Use *, 8> DeadOperands; 312}; 313} 314 315static Value *foldSelectInst(SelectInst &SI) { 316 // If the condition being selected on is a constant or the same value is 317 // being selected between, fold the select. Yes this does (rarely) happen 318 // early on. 319 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 320 return SI.getOperand(1+CI->isZero()); 321 if (SI.getOperand(1) == SI.getOperand(2)) 322 return SI.getOperand(1); 323 324 return nullptr; 325} 326 327/// \brief Builder for the alloca slices. 328/// 329/// This class builds a set of alloca slices by recursively visiting the uses 330/// of an alloca and making a slice for each load and store at each offset. 331class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 332 friend class PtrUseVisitor<SliceBuilder>; 333 friend class InstVisitor<SliceBuilder>; 334 typedef PtrUseVisitor<SliceBuilder> Base; 335 336 const uint64_t AllocSize; 337 AllocaSlices &S; 338 339 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 340 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 341 342 /// \brief Set to de-duplicate dead instructions found in the use walk. 343 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 344 345public: 346 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S) 347 : PtrUseVisitor<SliceBuilder>(DL), 348 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {} 349 350private: 351 void markAsDead(Instruction &I) { 352 if (VisitedDeadInsts.insert(&I)) 353 S.DeadUsers.push_back(&I); 354 } 355 356 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 357 bool IsSplittable = false) { 358 // Completely skip uses which have a zero size or start either before or 359 // past the end of the allocation. 360 if (Size == 0 || Offset.uge(AllocSize)) { 361 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset 362 << " which has zero size or starts outside of the " 363 << AllocSize << " byte alloca:\n" 364 << " alloca: " << S.AI << "\n" 365 << " use: " << I << "\n"); 366 return markAsDead(I); 367 } 368 369 uint64_t BeginOffset = Offset.getZExtValue(); 370 uint64_t EndOffset = BeginOffset + Size; 371 372 // Clamp the end offset to the end of the allocation. Note that this is 373 // formulated to handle even the case where "BeginOffset + Size" overflows. 374 // This may appear superficially to be something we could ignore entirely, 375 // but that is not so! There may be widened loads or PHI-node uses where 376 // some instructions are dead but not others. We can't completely ignore 377 // them, and so have to record at least the information here. 378 assert(AllocSize >= BeginOffset); // Established above. 379 if (Size > AllocSize - BeginOffset) { 380 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset 381 << " to remain within the " << AllocSize << " byte alloca:\n" 382 << " alloca: " << S.AI << "\n" 383 << " use: " << I << "\n"); 384 EndOffset = AllocSize; 385 } 386 387 S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 388 } 389 390 void visitBitCastInst(BitCastInst &BC) { 391 if (BC.use_empty()) 392 return markAsDead(BC); 393 394 return Base::visitBitCastInst(BC); 395 } 396 397 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 398 if (GEPI.use_empty()) 399 return markAsDead(GEPI); 400 401 if (SROAStrictInbounds && GEPI.isInBounds()) { 402 // FIXME: This is a manually un-factored variant of the basic code inside 403 // of GEPs with checking of the inbounds invariant specified in the 404 // langref in a very strict sense. If we ever want to enable 405 // SROAStrictInbounds, this code should be factored cleanly into 406 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 407 // by writing out the code here where we have tho underlying allocation 408 // size readily available. 409 APInt GEPOffset = Offset; 410 for (gep_type_iterator GTI = gep_type_begin(GEPI), 411 GTE = gep_type_end(GEPI); 412 GTI != GTE; ++GTI) { 413 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 414 if (!OpC) 415 break; 416 417 // Handle a struct index, which adds its field offset to the pointer. 418 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 419 unsigned ElementIdx = OpC->getZExtValue(); 420 const StructLayout *SL = DL.getStructLayout(STy); 421 GEPOffset += 422 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 423 } else { 424 // For array or vector indices, scale the index by the size of the type. 425 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 426 GEPOffset += Index * APInt(Offset.getBitWidth(), 427 DL.getTypeAllocSize(GTI.getIndexedType())); 428 } 429 430 // If this index has computed an intermediate pointer which is not 431 // inbounds, then the result of the GEP is a poison value and we can 432 // delete it and all uses. 433 if (GEPOffset.ugt(AllocSize)) 434 return markAsDead(GEPI); 435 } 436 } 437 438 return Base::visitGetElementPtrInst(GEPI); 439 } 440 441 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 442 uint64_t Size, bool IsVolatile) { 443 // We allow splitting of loads and stores where the type is an integer type 444 // and cover the entire alloca. This prevents us from splitting over 445 // eagerly. 446 // FIXME: In the great blue eventually, we should eagerly split all integer 447 // loads and stores, and then have a separate step that merges adjacent 448 // alloca partitions into a single partition suitable for integer widening. 449 // Or we should skip the merge step and rely on GVN and other passes to 450 // merge adjacent loads and stores that survive mem2reg. 451 bool IsSplittable = 452 Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize; 453 454 insertUse(I, Offset, Size, IsSplittable); 455 } 456 457 void visitLoadInst(LoadInst &LI) { 458 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 459 "All simple FCA loads should have been pre-split"); 460 461 if (!IsOffsetKnown) 462 return PI.setAborted(&LI); 463 464 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 465 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 466 } 467 468 void visitStoreInst(StoreInst &SI) { 469 Value *ValOp = SI.getValueOperand(); 470 if (ValOp == *U) 471 return PI.setEscapedAndAborted(&SI); 472 if (!IsOffsetKnown) 473 return PI.setAborted(&SI); 474 475 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); 476 477 // If this memory access can be shown to *statically* extend outside the 478 // bounds of of the allocation, it's behavior is undefined, so simply 479 // ignore it. Note that this is more strict than the generic clamping 480 // behavior of insertUse. We also try to handle cases which might run the 481 // risk of overflow. 482 // FIXME: We should instead consider the pointer to have escaped if this 483 // function is being instrumented for addressing bugs or race conditions. 484 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 485 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset 486 << " which extends past the end of the " << AllocSize 487 << " byte alloca:\n" 488 << " alloca: " << S.AI << "\n" 489 << " use: " << SI << "\n"); 490 return markAsDead(SI); 491 } 492 493 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 494 "All simple FCA stores should have been pre-split"); 495 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 496 } 497 498 499 void visitMemSetInst(MemSetInst &II) { 500 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 501 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 502 if ((Length && Length->getValue() == 0) || 503 (IsOffsetKnown && Offset.uge(AllocSize))) 504 // Zero-length mem transfer intrinsics can be ignored entirely. 505 return markAsDead(II); 506 507 if (!IsOffsetKnown) 508 return PI.setAborted(&II); 509 510 insertUse(II, Offset, 511 Length ? Length->getLimitedValue() 512 : AllocSize - Offset.getLimitedValue(), 513 (bool)Length); 514 } 515 516 void visitMemTransferInst(MemTransferInst &II) { 517 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 518 if (Length && Length->getValue() == 0) 519 // Zero-length mem transfer intrinsics can be ignored entirely. 520 return markAsDead(II); 521 522 // Because we can visit these intrinsics twice, also check to see if the 523 // first time marked this instruction as dead. If so, skip it. 524 if (VisitedDeadInsts.count(&II)) 525 return; 526 527 if (!IsOffsetKnown) 528 return PI.setAborted(&II); 529 530 // This side of the transfer is completely out-of-bounds, and so we can 531 // nuke the entire transfer. However, we also need to nuke the other side 532 // if already added to our partitions. 533 // FIXME: Yet another place we really should bypass this when 534 // instrumenting for ASan. 535 if (Offset.uge(AllocSize)) { 536 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II); 537 if (MTPI != MemTransferSliceMap.end()) 538 S.Slices[MTPI->second].kill(); 539 return markAsDead(II); 540 } 541 542 uint64_t RawOffset = Offset.getLimitedValue(); 543 uint64_t Size = Length ? Length->getLimitedValue() 544 : AllocSize - RawOffset; 545 546 // Check for the special case where the same exact value is used for both 547 // source and dest. 548 if (*U == II.getRawDest() && *U == II.getRawSource()) { 549 // For non-volatile transfers this is a no-op. 550 if (!II.isVolatile()) 551 return markAsDead(II); 552 553 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 554 } 555 556 // If we have seen both source and destination for a mem transfer, then 557 // they both point to the same alloca. 558 bool Inserted; 559 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 560 std::tie(MTPI, Inserted) = 561 MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size())); 562 unsigned PrevIdx = MTPI->second; 563 if (!Inserted) { 564 Slice &PrevP = S.Slices[PrevIdx]; 565 566 // Check if the begin offsets match and this is a non-volatile transfer. 567 // In that case, we can completely elide the transfer. 568 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 569 PrevP.kill(); 570 return markAsDead(II); 571 } 572 573 // Otherwise we have an offset transfer within the same alloca. We can't 574 // split those. 575 PrevP.makeUnsplittable(); 576 } 577 578 // Insert the use now that we've fixed up the splittable nature. 579 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 580 581 // Check that we ended up with a valid index in the map. 582 assert(S.Slices[PrevIdx].getUse()->getUser() == &II && 583 "Map index doesn't point back to a slice with this user."); 584 } 585 586 // Disable SRoA for any intrinsics except for lifetime invariants. 587 // FIXME: What about debug intrinsics? This matches old behavior, but 588 // doesn't make sense. 589 void visitIntrinsicInst(IntrinsicInst &II) { 590 if (!IsOffsetKnown) 591 return PI.setAborted(&II); 592 593 if (II.getIntrinsicID() == Intrinsic::lifetime_start || 594 II.getIntrinsicID() == Intrinsic::lifetime_end) { 595 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 596 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 597 Length->getLimitedValue()); 598 insertUse(II, Offset, Size, true); 599 return; 600 } 601 602 Base::visitIntrinsicInst(II); 603 } 604 605 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 606 // We consider any PHI or select that results in a direct load or store of 607 // the same offset to be a viable use for slicing purposes. These uses 608 // are considered unsplittable and the size is the maximum loaded or stored 609 // size. 610 SmallPtrSet<Instruction *, 4> Visited; 611 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 612 Visited.insert(Root); 613 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 614 // If there are no loads or stores, the access is dead. We mark that as 615 // a size zero access. 616 Size = 0; 617 do { 618 Instruction *I, *UsedI; 619 std::tie(UsedI, I) = Uses.pop_back_val(); 620 621 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 622 Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); 623 continue; 624 } 625 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 626 Value *Op = SI->getOperand(0); 627 if (Op == UsedI) 628 return SI; 629 Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); 630 continue; 631 } 632 633 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 634 if (!GEP->hasAllZeroIndices()) 635 return GEP; 636 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 637 !isa<SelectInst>(I)) { 638 return I; 639 } 640 641 for (User *U : I->users()) 642 if (Visited.insert(cast<Instruction>(U))) 643 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 644 } while (!Uses.empty()); 645 646 return nullptr; 647 } 648 649 void visitPHINode(PHINode &PN) { 650 if (PN.use_empty()) 651 return markAsDead(PN); 652 if (!IsOffsetKnown) 653 return PI.setAborted(&PN); 654 655 // See if we already have computed info on this node. 656 uint64_t &PHISize = PHIOrSelectSizes[&PN]; 657 if (!PHISize) { 658 // This is a new PHI node, check for an unsafe use of the PHI node. 659 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize)) 660 return PI.setAborted(UnsafeI); 661 } 662 663 // For PHI and select operands outside the alloca, we can't nuke the entire 664 // phi or select -- the other side might still be relevant, so we special 665 // case them here and use a separate structure to track the operands 666 // themselves which should be replaced with undef. 667 // FIXME: This should instead be escaped in the event we're instrumenting 668 // for address sanitization. 669 if (Offset.uge(AllocSize)) { 670 S.DeadOperands.push_back(U); 671 return; 672 } 673 674 insertUse(PN, Offset, PHISize); 675 } 676 677 void visitSelectInst(SelectInst &SI) { 678 if (SI.use_empty()) 679 return markAsDead(SI); 680 if (Value *Result = foldSelectInst(SI)) { 681 if (Result == *U) 682 // If the result of the constant fold will be the pointer, recurse 683 // through the select as if we had RAUW'ed it. 684 enqueueUsers(SI); 685 else 686 // Otherwise the operand to the select is dead, and we can replace it 687 // with undef. 688 S.DeadOperands.push_back(U); 689 690 return; 691 } 692 if (!IsOffsetKnown) 693 return PI.setAborted(&SI); 694 695 // See if we already have computed info on this node. 696 uint64_t &SelectSize = PHIOrSelectSizes[&SI]; 697 if (!SelectSize) { 698 // This is a new Select, check for an unsafe use of it. 699 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize)) 700 return PI.setAborted(UnsafeI); 701 } 702 703 // For PHI and select operands outside the alloca, we can't nuke the entire 704 // phi or select -- the other side might still be relevant, so we special 705 // case them here and use a separate structure to track the operands 706 // themselves which should be replaced with undef. 707 // FIXME: This should instead be escaped in the event we're instrumenting 708 // for address sanitization. 709 if (Offset.uge(AllocSize)) { 710 S.DeadOperands.push_back(U); 711 return; 712 } 713 714 insertUse(SI, Offset, SelectSize); 715 } 716 717 /// \brief Disable SROA entirely if there are unhandled users of the alloca. 718 void visitInstruction(Instruction &I) { 719 PI.setAborted(&I); 720 } 721}; 722 723AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 724 : 725#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 726 AI(AI), 727#endif 728 PointerEscapingInstr(nullptr) { 729 SliceBuilder PB(DL, AI, *this); 730 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 731 if (PtrI.isEscaped() || PtrI.isAborted()) { 732 // FIXME: We should sink the escape vs. abort info into the caller nicely, 733 // possibly by just storing the PtrInfo in the AllocaSlices. 734 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 735 : PtrI.getAbortingInst(); 736 assert(PointerEscapingInstr && "Did not track a bad instruction"); 737 return; 738 } 739 740 Slices.erase(std::remove_if(Slices.begin(), Slices.end(), 741 std::mem_fun_ref(&Slice::isDead)), 742 Slices.end()); 743 744#if __cplusplus >= 201103L && !defined(NDEBUG) 745 if (SROARandomShuffleSlices) { 746 std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec())); 747 std::shuffle(Slices.begin(), Slices.end(), MT); 748 } 749#endif 750 751 // Sort the uses. This arranges for the offsets to be in ascending order, 752 // and the sizes to be in descending order. 753 std::sort(Slices.begin(), Slices.end()); 754} 755 756#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 757 758void AllocaSlices::print(raw_ostream &OS, const_iterator I, 759 StringRef Indent) const { 760 printSlice(OS, I, Indent); 761 printUse(OS, I, Indent); 762} 763 764void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 765 StringRef Indent) const { 766 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 767 << " slice #" << (I - begin()) 768 << (I->isSplittable() ? " (splittable)" : "") << "\n"; 769} 770 771void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 772 StringRef Indent) const { 773 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 774} 775 776void AllocaSlices::print(raw_ostream &OS) const { 777 if (PointerEscapingInstr) { 778 OS << "Can't analyze slices for alloca: " << AI << "\n" 779 << " A pointer to this alloca escaped by:\n" 780 << " " << *PointerEscapingInstr << "\n"; 781 return; 782 } 783 784 OS << "Slices of alloca: " << AI << "\n"; 785 for (const_iterator I = begin(), E = end(); I != E; ++I) 786 print(OS, I); 787} 788 789LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 790 print(dbgs(), I); 791} 792LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 793 794#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 795 796namespace { 797/// \brief Implementation of LoadAndStorePromoter for promoting allocas. 798/// 799/// This subclass of LoadAndStorePromoter adds overrides to handle promoting 800/// the loads and stores of an alloca instruction, as well as updating its 801/// debug information. This is used when a domtree is unavailable and thus 802/// mem2reg in its full form can't be used to handle promotion of allocas to 803/// scalar values. 804class AllocaPromoter : public LoadAndStorePromoter { 805 AllocaInst &AI; 806 DIBuilder &DIB; 807 808 SmallVector<DbgDeclareInst *, 4> DDIs; 809 SmallVector<DbgValueInst *, 4> DVIs; 810 811public: 812 AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S, 813 AllocaInst &AI, DIBuilder &DIB) 814 : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} 815 816 void run(const SmallVectorImpl<Instruction*> &Insts) { 817 // Retain the debug information attached to the alloca for use when 818 // rewriting loads and stores. 819 if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) { 820 for (User *U : DebugNode->users()) 821 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 822 DDIs.push_back(DDI); 823 else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) 824 DVIs.push_back(DVI); 825 } 826 827 LoadAndStorePromoter::run(Insts); 828 829 // While we have the debug information, clear it off of the alloca. The 830 // caller takes care of deleting the alloca. 831 while (!DDIs.empty()) 832 DDIs.pop_back_val()->eraseFromParent(); 833 while (!DVIs.empty()) 834 DVIs.pop_back_val()->eraseFromParent(); 835 } 836 837 bool isInstInList(Instruction *I, 838 const SmallVectorImpl<Instruction*> &Insts) const override { 839 Value *Ptr; 840 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 841 Ptr = LI->getOperand(0); 842 else 843 Ptr = cast<StoreInst>(I)->getPointerOperand(); 844 845 // Only used to detect cycles, which will be rare and quickly found as 846 // we're walking up a chain of defs rather than down through uses. 847 SmallPtrSet<Value *, 4> Visited; 848 849 do { 850 if (Ptr == &AI) 851 return true; 852 853 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr)) 854 Ptr = BCI->getOperand(0); 855 else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) 856 Ptr = GEPI->getPointerOperand(); 857 else 858 return false; 859 860 } while (Visited.insert(Ptr)); 861 862 return false; 863 } 864 865 void updateDebugInfo(Instruction *Inst) const override { 866 for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(), 867 E = DDIs.end(); I != E; ++I) { 868 DbgDeclareInst *DDI = *I; 869 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 870 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 871 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 872 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 873 } 874 for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(), 875 E = DVIs.end(); I != E; ++I) { 876 DbgValueInst *DVI = *I; 877 Value *Arg = nullptr; 878 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 879 // If an argument is zero extended then use argument directly. The ZExt 880 // may be zapped by an optimization pass in future. 881 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) 882 Arg = dyn_cast<Argument>(ZExt->getOperand(0)); 883 else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) 884 Arg = dyn_cast<Argument>(SExt->getOperand(0)); 885 if (!Arg) 886 Arg = SI->getValueOperand(); 887 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 888 Arg = LI->getPointerOperand(); 889 } else { 890 continue; 891 } 892 Instruction *DbgVal = 893 DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()), 894 Inst); 895 DbgVal->setDebugLoc(DVI->getDebugLoc()); 896 } 897 } 898}; 899} // end anon namespace 900 901 902namespace { 903/// \brief An optimization pass providing Scalar Replacement of Aggregates. 904/// 905/// This pass takes allocations which can be completely analyzed (that is, they 906/// don't escape) and tries to turn them into scalar SSA values. There are 907/// a few steps to this process. 908/// 909/// 1) It takes allocations of aggregates and analyzes the ways in which they 910/// are used to try to split them into smaller allocations, ideally of 911/// a single scalar data type. It will split up memcpy and memset accesses 912/// as necessary and try to isolate individual scalar accesses. 913/// 2) It will transform accesses into forms which are suitable for SSA value 914/// promotion. This can be replacing a memset with a scalar store of an 915/// integer value, or it can involve speculating operations on a PHI or 916/// select to be a PHI or select of the results. 917/// 3) Finally, this will try to detect a pattern of accesses which map cleanly 918/// onto insert and extract operations on a vector value, and convert them to 919/// this form. By doing so, it will enable promotion of vector aggregates to 920/// SSA vector values. 921class SROA : public FunctionPass { 922 const bool RequiresDomTree; 923 924 LLVMContext *C; 925 const DataLayout *DL; 926 DominatorTree *DT; 927 928 /// \brief Worklist of alloca instructions to simplify. 929 /// 930 /// Each alloca in the function is added to this. Each new alloca formed gets 931 /// added to it as well to recursively simplify unless that alloca can be 932 /// directly promoted. Finally, each time we rewrite a use of an alloca other 933 /// the one being actively rewritten, we add it back onto the list if not 934 /// already present to ensure it is re-visited. 935 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist; 936 937 /// \brief A collection of instructions to delete. 938 /// We try to batch deletions to simplify code and make things a bit more 939 /// efficient. 940 SetVector<Instruction *, SmallVector<Instruction *, 8> > DeadInsts; 941 942 /// \brief Post-promotion worklist. 943 /// 944 /// Sometimes we discover an alloca which has a high probability of becoming 945 /// viable for SROA after a round of promotion takes place. In those cases, 946 /// the alloca is enqueued here for re-processing. 947 /// 948 /// Note that we have to be very careful to clear allocas out of this list in 949 /// the event they are deleted. 950 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist; 951 952 /// \brief A collection of alloca instructions we can directly promote. 953 std::vector<AllocaInst *> PromotableAllocas; 954 955 /// \brief A worklist of PHIs to speculate prior to promoting allocas. 956 /// 957 /// All of these PHIs have been checked for the safety of speculation and by 958 /// being speculated will allow promoting allocas currently in the promotable 959 /// queue. 960 SetVector<PHINode *, SmallVector<PHINode *, 2> > SpeculatablePHIs; 961 962 /// \brief A worklist of select instructions to speculate prior to promoting 963 /// allocas. 964 /// 965 /// All of these select instructions have been checked for the safety of 966 /// speculation and by being speculated will allow promoting allocas 967 /// currently in the promotable queue. 968 SetVector<SelectInst *, SmallVector<SelectInst *, 2> > SpeculatableSelects; 969 970public: 971 SROA(bool RequiresDomTree = true) 972 : FunctionPass(ID), RequiresDomTree(RequiresDomTree), 973 C(nullptr), DL(nullptr), DT(nullptr) { 974 initializeSROAPass(*PassRegistry::getPassRegistry()); 975 } 976 bool runOnFunction(Function &F) override; 977 void getAnalysisUsage(AnalysisUsage &AU) const override; 978 979 const char *getPassName() const override { return "SROA"; } 980 static char ID; 981 982private: 983 friend class PHIOrSelectSpeculator; 984 friend class AllocaSliceRewriter; 985 986 bool rewritePartition(AllocaInst &AI, AllocaSlices &S, 987 AllocaSlices::iterator B, AllocaSlices::iterator E, 988 int64_t BeginOffset, int64_t EndOffset, 989 ArrayRef<AllocaSlices::iterator> SplitUses); 990 bool splitAlloca(AllocaInst &AI, AllocaSlices &S); 991 bool runOnAlloca(AllocaInst &AI); 992 void clobberUse(Use &U); 993 void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas); 994 bool promoteAllocas(Function &F); 995}; 996} 997 998char SROA::ID = 0; 999 1000FunctionPass *llvm::createSROAPass(bool RequiresDomTree) { 1001 return new SROA(RequiresDomTree); 1002} 1003 1004INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates", 1005 false, false) 1006INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1007INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", 1008 false, false) 1009 1010/// Walk the range of a partitioning looking for a common type to cover this 1011/// sequence of slices. 1012static Type *findCommonType(AllocaSlices::const_iterator B, 1013 AllocaSlices::const_iterator E, 1014 uint64_t EndOffset) { 1015 Type *Ty = nullptr; 1016 bool TyIsCommon = true; 1017 IntegerType *ITy = nullptr; 1018 1019 // Note that we need to look at *every* alloca slice's Use to ensure we 1020 // always get consistent results regardless of the order of slices. 1021 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1022 Use *U = I->getUse(); 1023 if (isa<IntrinsicInst>(*U->getUser())) 1024 continue; 1025 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1026 continue; 1027 1028 Type *UserTy = nullptr; 1029 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1030 UserTy = LI->getType(); 1031 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1032 UserTy = SI->getValueOperand()->getType(); 1033 } 1034 1035 if (!UserTy || (Ty && Ty != UserTy)) 1036 TyIsCommon = false; // Give up on anything but an iN type. 1037 else 1038 Ty = UserTy; 1039 1040 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1041 // If the type is larger than the partition, skip it. We only encounter 1042 // this for split integer operations where we want to use the type of the 1043 // entity causing the split. Also skip if the type is not a byte width 1044 // multiple. 1045 if (UserITy->getBitWidth() % 8 != 0 || 1046 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1047 continue; 1048 1049 // Track the largest bitwidth integer type used in this way in case there 1050 // is no common type. 1051 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1052 ITy = UserITy; 1053 } 1054 } 1055 1056 return TyIsCommon ? Ty : ITy; 1057} 1058 1059/// PHI instructions that use an alloca and are subsequently loaded can be 1060/// rewritten to load both input pointers in the pred blocks and then PHI the 1061/// results, allowing the load of the alloca to be promoted. 1062/// From this: 1063/// %P2 = phi [i32* %Alloca, i32* %Other] 1064/// %V = load i32* %P2 1065/// to: 1066/// %V1 = load i32* %Alloca -> will be mem2reg'd 1067/// ... 1068/// %V2 = load i32* %Other 1069/// ... 1070/// %V = phi [i32 %V1, i32 %V2] 1071/// 1072/// We can do this to a select if its only uses are loads and if the operands 1073/// to the select can be loaded unconditionally. 1074/// 1075/// FIXME: This should be hoisted into a generic utility, likely in 1076/// Transforms/Util/Local.h 1077static bool isSafePHIToSpeculate(PHINode &PN, 1078 const DataLayout *DL = nullptr) { 1079 // For now, we can only do this promotion if the load is in the same block 1080 // as the PHI, and if there are no stores between the phi and load. 1081 // TODO: Allow recursive phi users. 1082 // TODO: Allow stores. 1083 BasicBlock *BB = PN.getParent(); 1084 unsigned MaxAlign = 0; 1085 bool HaveLoad = false; 1086 for (User *U : PN.users()) { 1087 LoadInst *LI = dyn_cast<LoadInst>(U); 1088 if (!LI || !LI->isSimple()) 1089 return false; 1090 1091 // For now we only allow loads in the same block as the PHI. This is 1092 // a common case that happens when instcombine merges two loads through 1093 // a PHI. 1094 if (LI->getParent() != BB) 1095 return false; 1096 1097 // Ensure that there are no instructions between the PHI and the load that 1098 // could store. 1099 for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) 1100 if (BBI->mayWriteToMemory()) 1101 return false; 1102 1103 MaxAlign = std::max(MaxAlign, LI->getAlignment()); 1104 HaveLoad = true; 1105 } 1106 1107 if (!HaveLoad) 1108 return false; 1109 1110 // We can only transform this if it is safe to push the loads into the 1111 // predecessor blocks. The only thing to watch out for is that we can't put 1112 // a possibly trapping load in the predecessor if it is a critical edge. 1113 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1114 TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1115 Value *InVal = PN.getIncomingValue(Idx); 1116 1117 // If the value is produced by the terminator of the predecessor (an 1118 // invoke) or it has side-effects, there is no valid place to put a load 1119 // in the predecessor. 1120 if (TI == InVal || TI->mayHaveSideEffects()) 1121 return false; 1122 1123 // If the predecessor has a single successor, then the edge isn't 1124 // critical. 1125 if (TI->getNumSuccessors() == 1) 1126 continue; 1127 1128 // If this pointer is always safe to load, or if we can prove that there 1129 // is already a load in the block, then we can move the load to the pred 1130 // block. 1131 if (InVal->isDereferenceablePointer() || 1132 isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL)) 1133 continue; 1134 1135 return false; 1136 } 1137 1138 return true; 1139} 1140 1141static void speculatePHINodeLoads(PHINode &PN) { 1142 DEBUG(dbgs() << " original: " << PN << "\n"); 1143 1144 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType(); 1145 IRBuilderTy PHIBuilder(&PN); 1146 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1147 PN.getName() + ".sroa.speculated"); 1148 1149 // Get the TBAA tag and alignment to use from one of the loads. It doesn't 1150 // matter which one we get and if any differ. 1151 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1152 MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); 1153 unsigned Align = SomeLoad->getAlignment(); 1154 1155 // Rewrite all loads of the PN to use the new PHI. 1156 while (!PN.use_empty()) { 1157 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1158 LI->replaceAllUsesWith(NewPN); 1159 LI->eraseFromParent(); 1160 } 1161 1162 // Inject loads into all of the pred blocks. 1163 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1164 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1165 TerminatorInst *TI = Pred->getTerminator(); 1166 Value *InVal = PN.getIncomingValue(Idx); 1167 IRBuilderTy PredBuilder(TI); 1168 1169 LoadInst *Load = PredBuilder.CreateLoad( 1170 InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1171 ++NumLoadsSpeculated; 1172 Load->setAlignment(Align); 1173 if (TBAATag) 1174 Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); 1175 NewPN->addIncoming(Load, Pred); 1176 } 1177 1178 DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1179 PN.eraseFromParent(); 1180} 1181 1182/// Select instructions that use an alloca and are subsequently loaded can be 1183/// rewritten to load both input pointers and then select between the result, 1184/// allowing the load of the alloca to be promoted. 1185/// From this: 1186/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1187/// %V = load i32* %P2 1188/// to: 1189/// %V1 = load i32* %Alloca -> will be mem2reg'd 1190/// %V2 = load i32* %Other 1191/// %V = select i1 %cond, i32 %V1, i32 %V2 1192/// 1193/// We can do this to a select if its only uses are loads and if the operand 1194/// to the select can be loaded unconditionally. 1195static bool isSafeSelectToSpeculate(SelectInst &SI, 1196 const DataLayout *DL = nullptr) { 1197 Value *TValue = SI.getTrueValue(); 1198 Value *FValue = SI.getFalseValue(); 1199 bool TDerefable = TValue->isDereferenceablePointer(); 1200 bool FDerefable = FValue->isDereferenceablePointer(); 1201 1202 for (User *U : SI.users()) { 1203 LoadInst *LI = dyn_cast<LoadInst>(U); 1204 if (!LI || !LI->isSimple()) 1205 return false; 1206 1207 // Both operands to the select need to be dereferencable, either 1208 // absolutely (e.g. allocas) or at this point because we can see other 1209 // accesses to it. 1210 if (!TDerefable && 1211 !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL)) 1212 return false; 1213 if (!FDerefable && 1214 !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL)) 1215 return false; 1216 } 1217 1218 return true; 1219} 1220 1221static void speculateSelectInstLoads(SelectInst &SI) { 1222 DEBUG(dbgs() << " original: " << SI << "\n"); 1223 1224 IRBuilderTy IRB(&SI); 1225 Value *TV = SI.getTrueValue(); 1226 Value *FV = SI.getFalseValue(); 1227 // Replace the loads of the select with a select of two loads. 1228 while (!SI.use_empty()) { 1229 LoadInst *LI = cast<LoadInst>(SI.user_back()); 1230 assert(LI->isSimple() && "We only speculate simple loads"); 1231 1232 IRB.SetInsertPoint(LI); 1233 LoadInst *TL = 1234 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); 1235 LoadInst *FL = 1236 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); 1237 NumLoadsSpeculated += 2; 1238 1239 // Transfer alignment and TBAA info if present. 1240 TL->setAlignment(LI->getAlignment()); 1241 FL->setAlignment(LI->getAlignment()); 1242 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) { 1243 TL->setMetadata(LLVMContext::MD_tbaa, Tag); 1244 FL->setMetadata(LLVMContext::MD_tbaa, Tag); 1245 } 1246 1247 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1248 LI->getName() + ".sroa.speculated"); 1249 1250 DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1251 LI->replaceAllUsesWith(V); 1252 LI->eraseFromParent(); 1253 } 1254 SI.eraseFromParent(); 1255} 1256 1257/// \brief Build a GEP out of a base pointer and indices. 1258/// 1259/// This will return the BasePtr if that is valid, or build a new GEP 1260/// instruction using the IRBuilder if GEP-ing is needed. 1261static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1262 SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { 1263 if (Indices.empty()) 1264 return BasePtr; 1265 1266 // A single zero index is a no-op, so check for this and avoid building a GEP 1267 // in that case. 1268 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1269 return BasePtr; 1270 1271 return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx"); 1272} 1273 1274/// \brief Get a natural GEP off of the BasePtr walking through Ty toward 1275/// TargetTy without changing the offset of the pointer. 1276/// 1277/// This routine assumes we've already established a properly offset GEP with 1278/// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1279/// zero-indices down through type layers until we find one the same as 1280/// TargetTy. If we can't find one with the same type, we at least try to use 1281/// one with the same size. If none of that works, we just produce the GEP as 1282/// indicated by Indices to have the correct offset. 1283static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1284 Value *BasePtr, Type *Ty, Type *TargetTy, 1285 SmallVectorImpl<Value *> &Indices, 1286 Twine NamePrefix) { 1287 if (Ty == TargetTy) 1288 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1289 1290 // Pointer size to use for the indices. 1291 unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType()); 1292 1293 // See if we can descend into a struct and locate a field with the correct 1294 // type. 1295 unsigned NumLayers = 0; 1296 Type *ElementTy = Ty; 1297 do { 1298 if (ElementTy->isPointerTy()) 1299 break; 1300 1301 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1302 ElementTy = ArrayTy->getElementType(); 1303 Indices.push_back(IRB.getIntN(PtrSize, 0)); 1304 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1305 ElementTy = VectorTy->getElementType(); 1306 Indices.push_back(IRB.getInt32(0)); 1307 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1308 if (STy->element_begin() == STy->element_end()) 1309 break; // Nothing left to descend into. 1310 ElementTy = *STy->element_begin(); 1311 Indices.push_back(IRB.getInt32(0)); 1312 } else { 1313 break; 1314 } 1315 ++NumLayers; 1316 } while (ElementTy != TargetTy); 1317 if (ElementTy != TargetTy) 1318 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1319 1320 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1321} 1322 1323/// \brief Recursively compute indices for a natural GEP. 1324/// 1325/// This is the recursive step for getNaturalGEPWithOffset that walks down the 1326/// element types adding appropriate indices for the GEP. 1327static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, 1328 Value *Ptr, Type *Ty, APInt &Offset, 1329 Type *TargetTy, 1330 SmallVectorImpl<Value *> &Indices, 1331 Twine NamePrefix) { 1332 if (Offset == 0) 1333 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix); 1334 1335 // We can't recurse through pointer types. 1336 if (Ty->isPointerTy()) 1337 return nullptr; 1338 1339 // We try to analyze GEPs over vectors here, but note that these GEPs are 1340 // extremely poorly defined currently. The long-term goal is to remove GEPing 1341 // over a vector from the IR completely. 1342 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { 1343 unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); 1344 if (ElementSizeInBits % 8 != 0) { 1345 // GEPs over non-multiple of 8 size vector elements are invalid. 1346 return nullptr; 1347 } 1348 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); 1349 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1350 if (NumSkippedElements.ugt(VecTy->getNumElements())) 1351 return nullptr; 1352 Offset -= NumSkippedElements * ElementSize; 1353 Indices.push_back(IRB.getInt(NumSkippedElements)); 1354 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), 1355 Offset, TargetTy, Indices, NamePrefix); 1356 } 1357 1358 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 1359 Type *ElementTy = ArrTy->getElementType(); 1360 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1361 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1362 if (NumSkippedElements.ugt(ArrTy->getNumElements())) 1363 return nullptr; 1364 1365 Offset -= NumSkippedElements * ElementSize; 1366 Indices.push_back(IRB.getInt(NumSkippedElements)); 1367 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1368 Indices, NamePrefix); 1369 } 1370 1371 StructType *STy = dyn_cast<StructType>(Ty); 1372 if (!STy) 1373 return nullptr; 1374 1375 const StructLayout *SL = DL.getStructLayout(STy); 1376 uint64_t StructOffset = Offset.getZExtValue(); 1377 if (StructOffset >= SL->getSizeInBytes()) 1378 return nullptr; 1379 unsigned Index = SL->getElementContainingOffset(StructOffset); 1380 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); 1381 Type *ElementTy = STy->getElementType(Index); 1382 if (Offset.uge(DL.getTypeAllocSize(ElementTy))) 1383 return nullptr; // The offset points into alignment padding. 1384 1385 Indices.push_back(IRB.getInt32(Index)); 1386 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1387 Indices, NamePrefix); 1388} 1389 1390/// \brief Get a natural GEP from a base pointer to a particular offset and 1391/// resulting in a particular type. 1392/// 1393/// The goal is to produce a "natural" looking GEP that works with the existing 1394/// composite types to arrive at the appropriate offset and element type for 1395/// a pointer. TargetTy is the element type the returned GEP should point-to if 1396/// possible. We recurse by decreasing Offset, adding the appropriate index to 1397/// Indices, and setting Ty to the result subtype. 1398/// 1399/// If no natural GEP can be constructed, this function returns null. 1400static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1401 Value *Ptr, APInt Offset, Type *TargetTy, 1402 SmallVectorImpl<Value *> &Indices, 1403 Twine NamePrefix) { 1404 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1405 1406 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1407 // an i8. 1408 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1409 return nullptr; 1410 1411 Type *ElementTy = Ty->getElementType(); 1412 if (!ElementTy->isSized()) 1413 return nullptr; // We can't GEP through an unsized element. 1414 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1415 if (ElementSize == 0) 1416 return nullptr; // Zero-length arrays can't help us build a natural GEP. 1417 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1418 1419 Offset -= NumSkippedElements * ElementSize; 1420 Indices.push_back(IRB.getInt(NumSkippedElements)); 1421 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1422 Indices, NamePrefix); 1423} 1424 1425/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the 1426/// resulting pointer has PointerTy. 1427/// 1428/// This tries very hard to compute a "natural" GEP which arrives at the offset 1429/// and produces the pointer type desired. Where it cannot, it will try to use 1430/// the natural GEP to arrive at the offset and bitcast to the type. Where that 1431/// fails, it will try to use an existing i8* and GEP to the byte offset and 1432/// bitcast to the type. 1433/// 1434/// The strategy for finding the more natural GEPs is to peel off layers of the 1435/// pointer, walking back through bit casts and GEPs, searching for a base 1436/// pointer from which we can compute a natural GEP with the desired 1437/// properties. The algorithm tries to fold as many constant indices into 1438/// a single GEP as possible, thus making each GEP more independent of the 1439/// surrounding code. 1440static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1441 APInt Offset, Type *PointerTy, 1442 Twine NamePrefix) { 1443 // Even though we don't look through PHI nodes, we could be called on an 1444 // instruction in an unreachable block, which may be on a cycle. 1445 SmallPtrSet<Value *, 4> Visited; 1446 Visited.insert(Ptr); 1447 SmallVector<Value *, 4> Indices; 1448 1449 // We may end up computing an offset pointer that has the wrong type. If we 1450 // never are able to compute one directly that has the correct type, we'll 1451 // fall back to it, so keep it around here. 1452 Value *OffsetPtr = nullptr; 1453 1454 // Remember any i8 pointer we come across to re-use if we need to do a raw 1455 // byte offset. 1456 Value *Int8Ptr = nullptr; 1457 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1458 1459 Type *TargetTy = PointerTy->getPointerElementType(); 1460 1461 do { 1462 // First fold any existing GEPs into the offset. 1463 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1464 APInt GEPOffset(Offset.getBitWidth(), 0); 1465 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1466 break; 1467 Offset += GEPOffset; 1468 Ptr = GEP->getPointerOperand(); 1469 if (!Visited.insert(Ptr)) 1470 break; 1471 } 1472 1473 // See if we can perform a natural GEP here. 1474 Indices.clear(); 1475 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1476 Indices, NamePrefix)) { 1477 if (P->getType() == PointerTy) { 1478 // Zap any offset pointer that we ended up computing in previous rounds. 1479 if (OffsetPtr && OffsetPtr->use_empty()) 1480 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) 1481 I->eraseFromParent(); 1482 return P; 1483 } 1484 if (!OffsetPtr) { 1485 OffsetPtr = P; 1486 } 1487 } 1488 1489 // Stash this pointer if we've found an i8*. 1490 if (Ptr->getType()->isIntegerTy(8)) { 1491 Int8Ptr = Ptr; 1492 Int8PtrOffset = Offset; 1493 } 1494 1495 // Peel off a layer of the pointer and update the offset appropriately. 1496 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1497 Ptr = cast<Operator>(Ptr)->getOperand(0); 1498 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1499 if (GA->mayBeOverridden()) 1500 break; 1501 Ptr = GA->getAliasee(); 1502 } else { 1503 break; 1504 } 1505 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1506 } while (Visited.insert(Ptr)); 1507 1508 if (!OffsetPtr) { 1509 if (!Int8Ptr) { 1510 Int8Ptr = IRB.CreateBitCast( 1511 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1512 NamePrefix + "sroa_raw_cast"); 1513 Int8PtrOffset = Offset; 1514 } 1515 1516 OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : 1517 IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), 1518 NamePrefix + "sroa_raw_idx"); 1519 } 1520 Ptr = OffsetPtr; 1521 1522 // On the off chance we were targeting i8*, guard the bitcast here. 1523 if (Ptr->getType() != PointerTy) 1524 Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); 1525 1526 return Ptr; 1527} 1528 1529/// \brief Test whether we can convert a value from the old to the new type. 1530/// 1531/// This predicate should be used to guard calls to convertValue in order to 1532/// ensure that we only try to convert viable values. The strategy is that we 1533/// will peel off single element struct and array wrappings to get to an 1534/// underlying value, and convert that value. 1535static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1536 if (OldTy == NewTy) 1537 return true; 1538 if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy)) 1539 if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy)) 1540 if (NewITy->getBitWidth() >= OldITy->getBitWidth()) 1541 return true; 1542 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) 1543 return false; 1544 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1545 return false; 1546 1547 // We can convert pointers to integers and vice-versa. Same for vectors 1548 // of pointers and integers. 1549 OldTy = OldTy->getScalarType(); 1550 NewTy = NewTy->getScalarType(); 1551 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1552 if (NewTy->isPointerTy() && OldTy->isPointerTy()) 1553 return true; 1554 if (NewTy->isIntegerTy() || OldTy->isIntegerTy()) 1555 return true; 1556 return false; 1557 } 1558 1559 return true; 1560} 1561 1562/// \brief Generic routine to convert an SSA value to a value of a different 1563/// type. 1564/// 1565/// This will try various different casting techniques, such as bitcasts, 1566/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1567/// two types for viability with this routine. 1568static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1569 Type *NewTy) { 1570 Type *OldTy = V->getType(); 1571 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1572 1573 if (OldTy == NewTy) 1574 return V; 1575 1576 if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy)) 1577 if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy)) 1578 if (NewITy->getBitWidth() > OldITy->getBitWidth()) 1579 return IRB.CreateZExt(V, NewITy); 1580 1581 // See if we need inttoptr for this type pair. A cast involving both scalars 1582 // and vectors requires and additional bitcast. 1583 if (OldTy->getScalarType()->isIntegerTy() && 1584 NewTy->getScalarType()->isPointerTy()) { 1585 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1586 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1587 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1588 NewTy); 1589 1590 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1591 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1592 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1593 NewTy); 1594 1595 return IRB.CreateIntToPtr(V, NewTy); 1596 } 1597 1598 // See if we need ptrtoint for this type pair. A cast involving both scalars 1599 // and vectors requires and additional bitcast. 1600 if (OldTy->getScalarType()->isPointerTy() && 1601 NewTy->getScalarType()->isIntegerTy()) { 1602 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1603 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1604 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1605 NewTy); 1606 1607 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1608 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1609 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1610 NewTy); 1611 1612 return IRB.CreatePtrToInt(V, NewTy); 1613 } 1614 1615 return IRB.CreateBitCast(V, NewTy); 1616} 1617 1618/// \brief Test whether the given slice use can be promoted to a vector. 1619/// 1620/// This function is called to test each entry in a partioning which is slated 1621/// for a single slice. 1622static bool isVectorPromotionViableForSlice( 1623 const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset, 1624 uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize, 1625 AllocaSlices::const_iterator I) { 1626 // First validate the slice offsets. 1627 uint64_t BeginOffset = 1628 std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset; 1629 uint64_t BeginIndex = BeginOffset / ElementSize; 1630 if (BeginIndex * ElementSize != BeginOffset || 1631 BeginIndex >= Ty->getNumElements()) 1632 return false; 1633 uint64_t EndOffset = 1634 std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset; 1635 uint64_t EndIndex = EndOffset / ElementSize; 1636 if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) 1637 return false; 1638 1639 assert(EndIndex > BeginIndex && "Empty vector!"); 1640 uint64_t NumElements = EndIndex - BeginIndex; 1641 Type *SliceTy = 1642 (NumElements == 1) ? Ty->getElementType() 1643 : VectorType::get(Ty->getElementType(), NumElements); 1644 1645 Type *SplitIntTy = 1646 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1647 1648 Use *U = I->getUse(); 1649 1650 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1651 if (MI->isVolatile()) 1652 return false; 1653 if (!I->isSplittable()) 1654 return false; // Skip any unsplittable intrinsics. 1655 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { 1656 // Disable vector promotion when there are loads or stores of an FCA. 1657 return false; 1658 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1659 if (LI->isVolatile()) 1660 return false; 1661 Type *LTy = LI->getType(); 1662 if (SliceBeginOffset > I->beginOffset() || 1663 SliceEndOffset < I->endOffset()) { 1664 assert(LTy->isIntegerTy()); 1665 LTy = SplitIntTy; 1666 } 1667 if (!canConvertValue(DL, SliceTy, LTy)) 1668 return false; 1669 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1670 if (SI->isVolatile()) 1671 return false; 1672 Type *STy = SI->getValueOperand()->getType(); 1673 if (SliceBeginOffset > I->beginOffset() || 1674 SliceEndOffset < I->endOffset()) { 1675 assert(STy->isIntegerTy()); 1676 STy = SplitIntTy; 1677 } 1678 if (!canConvertValue(DL, STy, SliceTy)) 1679 return false; 1680 } else { 1681 return false; 1682 } 1683 1684 return true; 1685} 1686 1687/// \brief Test whether the given alloca partitioning and range of slices can be 1688/// promoted to a vector. 1689/// 1690/// This is a quick test to check whether we can rewrite a particular alloca 1691/// partition (and its newly formed alloca) into a vector alloca with only 1692/// whole-vector loads and stores such that it could be promoted to a vector 1693/// SSA value. We only can ensure this for a limited set of operations, and we 1694/// don't want to do the rewrites unless we are confident that the result will 1695/// be promotable, so we have an early test here. 1696static bool 1697isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S, 1698 uint64_t SliceBeginOffset, uint64_t SliceEndOffset, 1699 AllocaSlices::const_iterator I, 1700 AllocaSlices::const_iterator E, 1701 ArrayRef<AllocaSlices::iterator> SplitUses) { 1702 VectorType *Ty = dyn_cast<VectorType>(AllocaTy); 1703 if (!Ty) 1704 return false; 1705 1706 uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType()); 1707 1708 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1709 // that aren't byte sized. 1710 if (ElementSize % 8) 1711 return false; 1712 assert((DL.getTypeSizeInBits(Ty) % 8) == 0 && 1713 "vector size not a multiple of element size?"); 1714 ElementSize /= 8; 1715 1716 for (; I != E; ++I) 1717 if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset, 1718 SliceEndOffset, Ty, ElementSize, I)) 1719 return false; 1720 1721 for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(), 1722 SUE = SplitUses.end(); 1723 SUI != SUE; ++SUI) 1724 if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset, 1725 SliceEndOffset, Ty, ElementSize, *SUI)) 1726 return false; 1727 1728 return true; 1729} 1730 1731/// \brief Test whether a slice of an alloca is valid for integer widening. 1732/// 1733/// This implements the necessary checking for the \c isIntegerWideningViable 1734/// test below on a single slice of the alloca. 1735static bool isIntegerWideningViableForSlice(const DataLayout &DL, 1736 Type *AllocaTy, 1737 uint64_t AllocBeginOffset, 1738 uint64_t Size, AllocaSlices &S, 1739 AllocaSlices::const_iterator I, 1740 bool &WholeAllocaOp) { 1741 uint64_t RelBegin = I->beginOffset() - AllocBeginOffset; 1742 uint64_t RelEnd = I->endOffset() - AllocBeginOffset; 1743 1744 // We can't reasonably handle cases where the load or store extends past 1745 // the end of the aloca's type and into its padding. 1746 if (RelEnd > Size) 1747 return false; 1748 1749 Use *U = I->getUse(); 1750 1751 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1752 if (LI->isVolatile()) 1753 return false; 1754 if (RelBegin == 0 && RelEnd == Size) 1755 WholeAllocaOp = true; 1756 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 1757 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 1758 return false; 1759 } else if (RelBegin != 0 || RelEnd != Size || 1760 !canConvertValue(DL, AllocaTy, LI->getType())) { 1761 // Non-integer loads need to be convertible from the alloca type so that 1762 // they are promotable. 1763 return false; 1764 } 1765 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1766 Type *ValueTy = SI->getValueOperand()->getType(); 1767 if (SI->isVolatile()) 1768 return false; 1769 if (RelBegin == 0 && RelEnd == Size) 1770 WholeAllocaOp = true; 1771 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 1772 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 1773 return false; 1774 } else if (RelBegin != 0 || RelEnd != Size || 1775 !canConvertValue(DL, ValueTy, AllocaTy)) { 1776 // Non-integer stores need to be convertible to the alloca type so that 1777 // they are promotable. 1778 return false; 1779 } 1780 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1781 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 1782 return false; 1783 if (!I->isSplittable()) 1784 return false; // Skip any unsplittable intrinsics. 1785 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1786 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1787 II->getIntrinsicID() != Intrinsic::lifetime_end) 1788 return false; 1789 } else { 1790 return false; 1791 } 1792 1793 return true; 1794} 1795 1796/// \brief Test whether the given alloca partition's integer operations can be 1797/// widened to promotable ones. 1798/// 1799/// This is a quick test to check whether we can rewrite the integer loads and 1800/// stores to a particular alloca into wider loads and stores and be able to 1801/// promote the resulting alloca. 1802static bool 1803isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, 1804 uint64_t AllocBeginOffset, AllocaSlices &S, 1805 AllocaSlices::const_iterator I, 1806 AllocaSlices::const_iterator E, 1807 ArrayRef<AllocaSlices::iterator> SplitUses) { 1808 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); 1809 // Don't create integer types larger than the maximum bitwidth. 1810 if (SizeInBits > IntegerType::MAX_INT_BITS) 1811 return false; 1812 1813 // Don't try to handle allocas with bit-padding. 1814 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) 1815 return false; 1816 1817 // We need to ensure that an integer type with the appropriate bitwidth can 1818 // be converted to the alloca type, whatever that is. We don't want to force 1819 // the alloca itself to have an integer type if there is a more suitable one. 1820 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 1821 if (!canConvertValue(DL, AllocaTy, IntTy) || 1822 !canConvertValue(DL, IntTy, AllocaTy)) 1823 return false; 1824 1825 uint64_t Size = DL.getTypeStoreSize(AllocaTy); 1826 1827 // While examining uses, we ensure that the alloca has a covering load or 1828 // store. We don't want to widen the integer operations only to fail to 1829 // promote due to some other unsplittable entry (which we may make splittable 1830 // later). However, if there are only splittable uses, go ahead and assume 1831 // that we cover the alloca. 1832 bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits); 1833 1834 for (; I != E; ++I) 1835 if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, 1836 S, I, WholeAllocaOp)) 1837 return false; 1838 1839 for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(), 1840 SUE = SplitUses.end(); 1841 SUI != SUE; ++SUI) 1842 if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, 1843 S, *SUI, WholeAllocaOp)) 1844 return false; 1845 1846 return WholeAllocaOp; 1847} 1848 1849static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1850 IntegerType *Ty, uint64_t Offset, 1851 const Twine &Name) { 1852 DEBUG(dbgs() << " start: " << *V << "\n"); 1853 IntegerType *IntTy = cast<IntegerType>(V->getType()); 1854 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 1855 "Element extends past full value"); 1856 uint64_t ShAmt = 8*Offset; 1857 if (DL.isBigEndian()) 1858 ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 1859 if (ShAmt) { 1860 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 1861 DEBUG(dbgs() << " shifted: " << *V << "\n"); 1862 } 1863 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 1864 "Cannot extract to a larger integer!"); 1865 if (Ty != IntTy) { 1866 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 1867 DEBUG(dbgs() << " trunced: " << *V << "\n"); 1868 } 1869 return V; 1870} 1871 1872static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 1873 Value *V, uint64_t Offset, const Twine &Name) { 1874 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 1875 IntegerType *Ty = cast<IntegerType>(V->getType()); 1876 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 1877 "Cannot insert a larger integer!"); 1878 DEBUG(dbgs() << " start: " << *V << "\n"); 1879 if (Ty != IntTy) { 1880 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 1881 DEBUG(dbgs() << " extended: " << *V << "\n"); 1882 } 1883 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 1884 "Element store outside of alloca store"); 1885 uint64_t ShAmt = 8*Offset; 1886 if (DL.isBigEndian()) 1887 ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 1888 if (ShAmt) { 1889 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 1890 DEBUG(dbgs() << " shifted: " << *V << "\n"); 1891 } 1892 1893 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 1894 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 1895 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 1896 DEBUG(dbgs() << " masked: " << *Old << "\n"); 1897 V = IRB.CreateOr(Old, V, Name + ".insert"); 1898 DEBUG(dbgs() << " inserted: " << *V << "\n"); 1899 } 1900 return V; 1901} 1902 1903static Value *extractVector(IRBuilderTy &IRB, Value *V, 1904 unsigned BeginIndex, unsigned EndIndex, 1905 const Twine &Name) { 1906 VectorType *VecTy = cast<VectorType>(V->getType()); 1907 unsigned NumElements = EndIndex - BeginIndex; 1908 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 1909 1910 if (NumElements == VecTy->getNumElements()) 1911 return V; 1912 1913 if (NumElements == 1) { 1914 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 1915 Name + ".extract"); 1916 DEBUG(dbgs() << " extract: " << *V << "\n"); 1917 return V; 1918 } 1919 1920 SmallVector<Constant*, 8> Mask; 1921 Mask.reserve(NumElements); 1922 for (unsigned i = BeginIndex; i != EndIndex; ++i) 1923 Mask.push_back(IRB.getInt32(i)); 1924 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 1925 ConstantVector::get(Mask), 1926 Name + ".extract"); 1927 DEBUG(dbgs() << " shuffle: " << *V << "\n"); 1928 return V; 1929} 1930 1931static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 1932 unsigned BeginIndex, const Twine &Name) { 1933 VectorType *VecTy = cast<VectorType>(Old->getType()); 1934 assert(VecTy && "Can only insert a vector into a vector"); 1935 1936 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 1937 if (!Ty) { 1938 // Single element to insert. 1939 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 1940 Name + ".insert"); 1941 DEBUG(dbgs() << " insert: " << *V << "\n"); 1942 return V; 1943 } 1944 1945 assert(Ty->getNumElements() <= VecTy->getNumElements() && 1946 "Too many elements!"); 1947 if (Ty->getNumElements() == VecTy->getNumElements()) { 1948 assert(V->getType() == VecTy && "Vector type mismatch"); 1949 return V; 1950 } 1951 unsigned EndIndex = BeginIndex + Ty->getNumElements(); 1952 1953 // When inserting a smaller vector into the larger to store, we first 1954 // use a shuffle vector to widen it with undef elements, and then 1955 // a second shuffle vector to select between the loaded vector and the 1956 // incoming vector. 1957 SmallVector<Constant*, 8> Mask; 1958 Mask.reserve(VecTy->getNumElements()); 1959 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 1960 if (i >= BeginIndex && i < EndIndex) 1961 Mask.push_back(IRB.getInt32(i - BeginIndex)); 1962 else 1963 Mask.push_back(UndefValue::get(IRB.getInt32Ty())); 1964 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 1965 ConstantVector::get(Mask), 1966 Name + ".expand"); 1967 DEBUG(dbgs() << " shuffle: " << *V << "\n"); 1968 1969 Mask.clear(); 1970 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 1971 Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 1972 1973 V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); 1974 1975 DEBUG(dbgs() << " blend: " << *V << "\n"); 1976 return V; 1977} 1978 1979namespace { 1980/// \brief Visitor to rewrite instructions using p particular slice of an alloca 1981/// to use a new alloca. 1982/// 1983/// Also implements the rewriting to vector-based accesses when the partition 1984/// passes the isVectorPromotionViable predicate. Most of the rewriting logic 1985/// lives here. 1986class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> { 1987 // Befriend the base class so it can delegate to private visit methods. 1988 friend class llvm::InstVisitor<AllocaSliceRewriter, bool>; 1989 typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base; 1990 1991 const DataLayout &DL; 1992 AllocaSlices &S; 1993 SROA &Pass; 1994 AllocaInst &OldAI, &NewAI; 1995 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 1996 Type *NewAllocaTy; 1997 1998 // If we are rewriting an alloca partition which can be written as pure 1999 // vector operations, we stash extra information here. When VecTy is 2000 // non-null, we have some strict guarantees about the rewritten alloca: 2001 // - The new alloca is exactly the size of the vector type here. 2002 // - The accesses all either map to the entire vector or to a single 2003 // element. 2004 // - The set of accessing instructions is only one of those handled above 2005 // in isVectorPromotionViable. Generally these are the same access kinds 2006 // which are promotable via mem2reg. 2007 VectorType *VecTy; 2008 Type *ElementTy; 2009 uint64_t ElementSize; 2010 2011 // This is a convenience and flag variable that will be null unless the new 2012 // alloca's integer operations should be widened to this integer type due to 2013 // passing isIntegerWideningViable above. If it is non-null, the desired 2014 // integer type will be stored here for easy access during rewriting. 2015 IntegerType *IntTy; 2016 2017 // The original offset of the slice currently being rewritten relative to 2018 // the original alloca. 2019 uint64_t BeginOffset, EndOffset; 2020 // The new offsets of the slice currently being rewritten relative to the 2021 // original alloca. 2022 uint64_t NewBeginOffset, NewEndOffset; 2023 2024 uint64_t SliceSize; 2025 bool IsSplittable; 2026 bool IsSplit; 2027 Use *OldUse; 2028 Instruction *OldPtr; 2029 2030 // Track post-rewrite users which are PHI nodes and Selects. 2031 SmallPtrSetImpl<PHINode *> &PHIUsers; 2032 SmallPtrSetImpl<SelectInst *> &SelectUsers; 2033 2034 // Utility IR builder, whose name prefix is setup for each visited use, and 2035 // the insertion point is set to point to the user. 2036 IRBuilderTy IRB; 2037 2038public: 2039 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass, 2040 AllocaInst &OldAI, AllocaInst &NewAI, 2041 uint64_t NewAllocaBeginOffset, 2042 uint64_t NewAllocaEndOffset, bool IsVectorPromotable, 2043 bool IsIntegerPromotable, 2044 SmallPtrSetImpl<PHINode *> &PHIUsers, 2045 SmallPtrSetImpl<SelectInst *> &SelectUsers) 2046 : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2047 NewAllocaBeginOffset(NewAllocaBeginOffset), 2048 NewAllocaEndOffset(NewAllocaEndOffset), 2049 NewAllocaTy(NewAI.getAllocatedType()), 2050 VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : nullptr), 2051 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2052 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), 2053 IntTy(IsIntegerPromotable 2054 ? Type::getIntNTy( 2055 NewAI.getContext(), 2056 DL.getTypeSizeInBits(NewAI.getAllocatedType())) 2057 : nullptr), 2058 BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), 2059 OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2060 IRB(NewAI.getContext(), ConstantFolder()) { 2061 if (VecTy) { 2062 assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && 2063 "Only multiple-of-8 sized vector elements are viable"); 2064 ++NumVectorized; 2065 } 2066 assert((!IsVectorPromotable && !IsIntegerPromotable) || 2067 IsVectorPromotable != IsIntegerPromotable); 2068 } 2069 2070 bool visit(AllocaSlices::const_iterator I) { 2071 bool CanSROA = true; 2072 BeginOffset = I->beginOffset(); 2073 EndOffset = I->endOffset(); 2074 IsSplittable = I->isSplittable(); 2075 IsSplit = 2076 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2077 2078 // Compute the intersecting offset range. 2079 assert(BeginOffset < NewAllocaEndOffset); 2080 assert(EndOffset > NewAllocaBeginOffset); 2081 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2082 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2083 2084 SliceSize = NewEndOffset - NewBeginOffset; 2085 2086 OldUse = I->getUse(); 2087 OldPtr = cast<Instruction>(OldUse->get()); 2088 2089 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2090 IRB.SetInsertPoint(OldUserI); 2091 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2092 IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2093 2094 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2095 if (VecTy || IntTy) 2096 assert(CanSROA); 2097 return CanSROA; 2098 } 2099 2100private: 2101 // Make sure the other visit overloads are visible. 2102 using Base::visit; 2103 2104 // Every instruction which can end up as a user must have a rewrite rule. 2105 bool visitInstruction(Instruction &I) { 2106 DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2107 llvm_unreachable("No rewrite rule for this instruction!"); 2108 } 2109 2110 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2111 // Note that the offset computation can use BeginOffset or NewBeginOffset 2112 // interchangeably for unsplit slices. 2113 assert(IsSplit || BeginOffset == NewBeginOffset); 2114 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2115 2116#ifndef NDEBUG 2117 StringRef OldName = OldPtr->getName(); 2118 // Skip through the last '.sroa.' component of the name. 2119 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2120 if (LastSROAPrefix != StringRef::npos) { 2121 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2122 // Look for an SROA slice index. 2123 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2124 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2125 // Strip the index and look for the offset. 2126 OldName = OldName.substr(IndexEnd + 1); 2127 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2128 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2129 // Strip the offset. 2130 OldName = OldName.substr(OffsetEnd + 1); 2131 } 2132 } 2133 // Strip any SROA suffixes as well. 2134 OldName = OldName.substr(0, OldName.find(".sroa_")); 2135#endif 2136 2137 return getAdjustedPtr(IRB, DL, &NewAI, 2138 APInt(DL.getPointerSizeInBits(), Offset), PointerTy, 2139#ifndef NDEBUG 2140 Twine(OldName) + "." 2141#else 2142 Twine() 2143#endif 2144 ); 2145 } 2146 2147 /// \brief Compute suitable alignment to access this slice of the *new* alloca. 2148 /// 2149 /// You can optionally pass a type to this routine and if that type's ABI 2150 /// alignment is itself suitable, this will return zero. 2151 unsigned getSliceAlign(Type *Ty = nullptr) { 2152 unsigned NewAIAlign = NewAI.getAlignment(); 2153 if (!NewAIAlign) 2154 NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); 2155 unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); 2156 return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; 2157 } 2158 2159 unsigned getIndex(uint64_t Offset) { 2160 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2161 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2162 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2163 uint32_t Index = RelOffset / ElementSize; 2164 assert(Index * ElementSize == RelOffset); 2165 return Index; 2166 } 2167 2168 void deleteIfTriviallyDead(Value *V) { 2169 Instruction *I = cast<Instruction>(V); 2170 if (isInstructionTriviallyDead(I)) 2171 Pass.DeadInsts.insert(I); 2172 } 2173 2174 Value *rewriteVectorizedLoadInst() { 2175 unsigned BeginIndex = getIndex(NewBeginOffset); 2176 unsigned EndIndex = getIndex(NewEndOffset); 2177 assert(EndIndex > BeginIndex && "Empty vector!"); 2178 2179 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2180 "load"); 2181 return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); 2182 } 2183 2184 Value *rewriteIntegerLoad(LoadInst &LI) { 2185 assert(IntTy && "We cannot insert an integer to the alloca"); 2186 assert(!LI.isVolatile()); 2187 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2188 "load"); 2189 V = convertValue(DL, IRB, V, IntTy); 2190 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2191 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2192 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) 2193 V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset, 2194 "extract"); 2195 return V; 2196 } 2197 2198 bool visitLoadInst(LoadInst &LI) { 2199 DEBUG(dbgs() << " original: " << LI << "\n"); 2200 Value *OldOp = LI.getOperand(0); 2201 assert(OldOp == OldPtr); 2202 2203 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2204 : LI.getType(); 2205 bool IsPtrAdjusted = false; 2206 Value *V; 2207 if (VecTy) { 2208 V = rewriteVectorizedLoadInst(); 2209 } else if (IntTy && LI.getType()->isIntegerTy()) { 2210 V = rewriteIntegerLoad(LI); 2211 } else if (NewBeginOffset == NewAllocaBeginOffset && 2212 canConvertValue(DL, NewAllocaTy, LI.getType())) { 2213 V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2214 LI.isVolatile(), LI.getName()); 2215 } else { 2216 Type *LTy = TargetTy->getPointerTo(); 2217 V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), 2218 getSliceAlign(TargetTy), LI.isVolatile(), 2219 LI.getName()); 2220 IsPtrAdjusted = true; 2221 } 2222 V = convertValue(DL, IRB, V, TargetTy); 2223 2224 if (IsSplit) { 2225 assert(!LI.isVolatile()); 2226 assert(LI.getType()->isIntegerTy() && 2227 "Only integer type loads and stores are split"); 2228 assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && 2229 "Split load isn't smaller than original load"); 2230 assert(LI.getType()->getIntegerBitWidth() == 2231 DL.getTypeStoreSizeInBits(LI.getType()) && 2232 "Non-byte-multiple bit width"); 2233 // Move the insertion point just past the load so that we can refer to it. 2234 IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); 2235 // Create a placeholder value with the same type as LI to use as the 2236 // basis for the new value. This allows us to replace the uses of LI with 2237 // the computed value, and then replace the placeholder with LI, leaving 2238 // LI only used for this computation. 2239 Value *Placeholder 2240 = new LoadInst(UndefValue::get(LI.getType()->getPointerTo())); 2241 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset, 2242 "insert"); 2243 LI.replaceAllUsesWith(V); 2244 Placeholder->replaceAllUsesWith(&LI); 2245 delete Placeholder; 2246 } else { 2247 LI.replaceAllUsesWith(V); 2248 } 2249 2250 Pass.DeadInsts.insert(&LI); 2251 deleteIfTriviallyDead(OldOp); 2252 DEBUG(dbgs() << " to: " << *V << "\n"); 2253 return !LI.isVolatile() && !IsPtrAdjusted; 2254 } 2255 2256 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { 2257 if (V->getType() != VecTy) { 2258 unsigned BeginIndex = getIndex(NewBeginOffset); 2259 unsigned EndIndex = getIndex(NewEndOffset); 2260 assert(EndIndex > BeginIndex && "Empty vector!"); 2261 unsigned NumElements = EndIndex - BeginIndex; 2262 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2263 Type *SliceTy = 2264 (NumElements == 1) ? ElementTy 2265 : VectorType::get(ElementTy, NumElements); 2266 if (V->getType() != SliceTy) 2267 V = convertValue(DL, IRB, V, SliceTy); 2268 2269 // Mix in the existing elements. 2270 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2271 "load"); 2272 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2273 } 2274 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2275 Pass.DeadInsts.insert(&SI); 2276 2277 (void)Store; 2278 DEBUG(dbgs() << " to: " << *Store << "\n"); 2279 return true; 2280 } 2281 2282 bool rewriteIntegerStore(Value *V, StoreInst &SI) { 2283 assert(IntTy && "We cannot extract an integer from the alloca"); 2284 assert(!SI.isVolatile()); 2285 if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { 2286 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2287 "oldload"); 2288 Old = convertValue(DL, IRB, Old, IntTy); 2289 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2290 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2291 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, 2292 "insert"); 2293 } 2294 V = convertValue(DL, IRB, V, NewAllocaTy); 2295 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2296 Pass.DeadInsts.insert(&SI); 2297 (void)Store; 2298 DEBUG(dbgs() << " to: " << *Store << "\n"); 2299 return true; 2300 } 2301 2302 bool visitStoreInst(StoreInst &SI) { 2303 DEBUG(dbgs() << " original: " << SI << "\n"); 2304 Value *OldOp = SI.getOperand(1); 2305 assert(OldOp == OldPtr); 2306 2307 Value *V = SI.getValueOperand(); 2308 2309 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2310 // alloca that should be re-examined after promoting this alloca. 2311 if (V->getType()->isPointerTy()) 2312 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2313 Pass.PostPromotionWorklist.insert(AI); 2314 2315 if (SliceSize < DL.getTypeStoreSize(V->getType())) { 2316 assert(!SI.isVolatile()); 2317 assert(V->getType()->isIntegerTy() && 2318 "Only integer type loads and stores are split"); 2319 assert(V->getType()->getIntegerBitWidth() == 2320 DL.getTypeStoreSizeInBits(V->getType()) && 2321 "Non-byte-multiple bit width"); 2322 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2323 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset, 2324 "extract"); 2325 } 2326 2327 if (VecTy) 2328 return rewriteVectorizedStoreInst(V, SI, OldOp); 2329 if (IntTy && V->getType()->isIntegerTy()) 2330 return rewriteIntegerStore(V, SI); 2331 2332 StoreInst *NewSI; 2333 if (NewBeginOffset == NewAllocaBeginOffset && 2334 NewEndOffset == NewAllocaEndOffset && 2335 canConvertValue(DL, V->getType(), NewAllocaTy)) { 2336 V = convertValue(DL, IRB, V, NewAllocaTy); 2337 NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2338 SI.isVolatile()); 2339 } else { 2340 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo()); 2341 NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), 2342 SI.isVolatile()); 2343 } 2344 (void)NewSI; 2345 Pass.DeadInsts.insert(&SI); 2346 deleteIfTriviallyDead(OldOp); 2347 2348 DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2349 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); 2350 } 2351 2352 /// \brief Compute an integer value from splatting an i8 across the given 2353 /// number of bytes. 2354 /// 2355 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2356 /// call this routine. 2357 /// FIXME: Heed the advice above. 2358 /// 2359 /// \param V The i8 value to splat. 2360 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2361 Value *getIntegerSplat(Value *V, unsigned Size) { 2362 assert(Size > 0 && "Expected a positive number of bytes."); 2363 IntegerType *VTy = cast<IntegerType>(V->getType()); 2364 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2365 if (Size == 1) 2366 return V; 2367 2368 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8); 2369 V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"), 2370 ConstantExpr::getUDiv( 2371 Constant::getAllOnesValue(SplatIntTy), 2372 ConstantExpr::getZExt( 2373 Constant::getAllOnesValue(V->getType()), 2374 SplatIntTy)), 2375 "isplat"); 2376 return V; 2377 } 2378 2379 /// \brief Compute a vector splat for a given element value. 2380 Value *getVectorSplat(Value *V, unsigned NumElements) { 2381 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2382 DEBUG(dbgs() << " splat: " << *V << "\n"); 2383 return V; 2384 } 2385 2386 bool visitMemSetInst(MemSetInst &II) { 2387 DEBUG(dbgs() << " original: " << II << "\n"); 2388 assert(II.getRawDest() == OldPtr); 2389 2390 // If the memset has a variable size, it cannot be split, just adjust the 2391 // pointer to the new alloca. 2392 if (!isa<Constant>(II.getLength())) { 2393 assert(!IsSplit); 2394 assert(NewBeginOffset == BeginOffset); 2395 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2396 Type *CstTy = II.getAlignmentCst()->getType(); 2397 II.setAlignment(ConstantInt::get(CstTy, getSliceAlign())); 2398 2399 deleteIfTriviallyDead(OldPtr); 2400 return false; 2401 } 2402 2403 // Record this instruction for deletion. 2404 Pass.DeadInsts.insert(&II); 2405 2406 Type *AllocaTy = NewAI.getAllocatedType(); 2407 Type *ScalarTy = AllocaTy->getScalarType(); 2408 2409 // If this doesn't map cleanly onto the alloca type, and that type isn't 2410 // a single value type, just emit a memset. 2411 if (!VecTy && !IntTy && 2412 (BeginOffset > NewAllocaBeginOffset || 2413 EndOffset < NewAllocaEndOffset || 2414 !AllocaTy->isSingleValueType() || 2415 !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || 2416 DL.getTypeSizeInBits(ScalarTy)%8 != 0)) { 2417 Type *SizeTy = II.getLength()->getType(); 2418 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2419 CallInst *New = IRB.CreateMemSet( 2420 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2421 getSliceAlign(), II.isVolatile()); 2422 (void)New; 2423 DEBUG(dbgs() << " to: " << *New << "\n"); 2424 return false; 2425 } 2426 2427 // If we can represent this as a simple value, we have to build the actual 2428 // value to store, which requires expanding the byte present in memset to 2429 // a sensible representation for the alloca type. This is essentially 2430 // splatting the byte to a sufficiently wide integer, splatting it across 2431 // any desired vector width, and bitcasting to the final type. 2432 Value *V; 2433 2434 if (VecTy) { 2435 // If this is a memset of a vectorized alloca, insert it. 2436 assert(ElementTy == ScalarTy); 2437 2438 unsigned BeginIndex = getIndex(NewBeginOffset); 2439 unsigned EndIndex = getIndex(NewEndOffset); 2440 assert(EndIndex > BeginIndex && "Empty vector!"); 2441 unsigned NumElements = EndIndex - BeginIndex; 2442 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2443 2444 Value *Splat = 2445 getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); 2446 Splat = convertValue(DL, IRB, Splat, ElementTy); 2447 if (NumElements > 1) 2448 Splat = getVectorSplat(Splat, NumElements); 2449 2450 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2451 "oldload"); 2452 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2453 } else if (IntTy) { 2454 // If this is a memset on an alloca where we can widen stores, insert the 2455 // set integer. 2456 assert(!II.isVolatile()); 2457 2458 uint64_t Size = NewEndOffset - NewBeginOffset; 2459 V = getIntegerSplat(II.getValue(), Size); 2460 2461 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2462 EndOffset != NewAllocaBeginOffset)) { 2463 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2464 "oldload"); 2465 Old = convertValue(DL, IRB, Old, IntTy); 2466 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2467 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2468 } else { 2469 assert(V->getType() == IntTy && 2470 "Wrong type for an alloca wide integer!"); 2471 } 2472 V = convertValue(DL, IRB, V, AllocaTy); 2473 } else { 2474 // Established these invariants above. 2475 assert(NewBeginOffset == NewAllocaBeginOffset); 2476 assert(NewEndOffset == NewAllocaEndOffset); 2477 2478 V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); 2479 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2480 V = getVectorSplat(V, AllocaVecTy->getNumElements()); 2481 2482 V = convertValue(DL, IRB, V, AllocaTy); 2483 } 2484 2485 Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2486 II.isVolatile()); 2487 (void)New; 2488 DEBUG(dbgs() << " to: " << *New << "\n"); 2489 return !II.isVolatile(); 2490 } 2491 2492 bool visitMemTransferInst(MemTransferInst &II) { 2493 // Rewriting of memory transfer instructions can be a bit tricky. We break 2494 // them into two categories: split intrinsics and unsplit intrinsics. 2495 2496 DEBUG(dbgs() << " original: " << II << "\n"); 2497 2498 bool IsDest = &II.getRawDestUse() == OldUse; 2499 assert((IsDest && II.getRawDest() == OldPtr) || 2500 (!IsDest && II.getRawSource() == OldPtr)); 2501 2502 unsigned SliceAlign = getSliceAlign(); 2503 2504 // For unsplit intrinsics, we simply modify the source and destination 2505 // pointers in place. This isn't just an optimization, it is a matter of 2506 // correctness. With unsplit intrinsics we may be dealing with transfers 2507 // within a single alloca before SROA ran, or with transfers that have 2508 // a variable length. We may also be dealing with memmove instead of 2509 // memcpy, and so simply updating the pointers is the necessary for us to 2510 // update both source and dest of a single call. 2511 if (!IsSplittable) { 2512 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2513 if (IsDest) 2514 II.setDest(AdjustedPtr); 2515 else 2516 II.setSource(AdjustedPtr); 2517 2518 if (II.getAlignment() > SliceAlign) { 2519 Type *CstTy = II.getAlignmentCst()->getType(); 2520 II.setAlignment( 2521 ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); 2522 } 2523 2524 DEBUG(dbgs() << " to: " << II << "\n"); 2525 deleteIfTriviallyDead(OldPtr); 2526 return false; 2527 } 2528 // For split transfer intrinsics we have an incredibly useful assurance: 2529 // the source and destination do not reside within the same alloca, and at 2530 // least one of them does not escape. This means that we can replace 2531 // memmove with memcpy, and we don't need to worry about all manner of 2532 // downsides to splitting and transforming the operations. 2533 2534 // If this doesn't map cleanly onto the alloca type, and that type isn't 2535 // a single value type, just emit a memcpy. 2536 bool EmitMemCpy 2537 = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || 2538 EndOffset < NewAllocaEndOffset || 2539 !NewAI.getAllocatedType()->isSingleValueType()); 2540 2541 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2542 // size hasn't been shrunk based on analysis of the viable range, this is 2543 // a no-op. 2544 if (EmitMemCpy && &OldAI == &NewAI) { 2545 // Ensure the start lines up. 2546 assert(NewBeginOffset == BeginOffset); 2547 2548 // Rewrite the size as needed. 2549 if (NewEndOffset != EndOffset) 2550 II.setLength(ConstantInt::get(II.getLength()->getType(), 2551 NewEndOffset - NewBeginOffset)); 2552 return false; 2553 } 2554 // Record this instruction for deletion. 2555 Pass.DeadInsts.insert(&II); 2556 2557 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2558 // alloca that should be re-examined after rewriting this instruction. 2559 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2560 if (AllocaInst *AI 2561 = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2562 assert(AI != &OldAI && AI != &NewAI && 2563 "Splittable transfers cannot reach the same alloca on both ends."); 2564 Pass.Worklist.insert(AI); 2565 } 2566 2567 Type *OtherPtrTy = OtherPtr->getType(); 2568 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2569 2570 // Compute the relative offset for the other pointer within the transfer. 2571 unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS); 2572 APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); 2573 unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, 2574 OtherOffset.zextOrTrunc(64).getZExtValue()); 2575 2576 if (EmitMemCpy) { 2577 // Compute the other pointer, folding as much as possible to produce 2578 // a single, simple GEP in most cases. 2579 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2580 OtherPtr->getName() + "."); 2581 2582 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2583 Type *SizeTy = II.getLength()->getType(); 2584 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2585 2586 CallInst *New = IRB.CreateMemCpy( 2587 IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, 2588 MinAlign(SliceAlign, OtherAlign), II.isVolatile()); 2589 (void)New; 2590 DEBUG(dbgs() << " to: " << *New << "\n"); 2591 return false; 2592 } 2593 2594 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 2595 NewEndOffset == NewAllocaEndOffset; 2596 uint64_t Size = NewEndOffset - NewBeginOffset; 2597 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 2598 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 2599 unsigned NumElements = EndIndex - BeginIndex; 2600 IntegerType *SubIntTy 2601 = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr; 2602 2603 // Reset the other pointer type to match the register type we're going to 2604 // use, but using the address space of the original other pointer. 2605 if (VecTy && !IsWholeAlloca) { 2606 if (NumElements == 1) 2607 OtherPtrTy = VecTy->getElementType(); 2608 else 2609 OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); 2610 2611 OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); 2612 } else if (IntTy && !IsWholeAlloca) { 2613 OtherPtrTy = SubIntTy->getPointerTo(OtherAS); 2614 } else { 2615 OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); 2616 } 2617 2618 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2619 OtherPtr->getName() + "."); 2620 unsigned SrcAlign = OtherAlign; 2621 Value *DstPtr = &NewAI; 2622 unsigned DstAlign = SliceAlign; 2623 if (!IsDest) { 2624 std::swap(SrcPtr, DstPtr); 2625 std::swap(SrcAlign, DstAlign); 2626 } 2627 2628 Value *Src; 2629 if (VecTy && !IsWholeAlloca && !IsDest) { 2630 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2631 "load"); 2632 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 2633 } else if (IntTy && !IsWholeAlloca && !IsDest) { 2634 Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2635 "load"); 2636 Src = convertValue(DL, IRB, Src, IntTy); 2637 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2638 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 2639 } else { 2640 Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), 2641 "copyload"); 2642 } 2643 2644 if (VecTy && !IsWholeAlloca && IsDest) { 2645 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2646 "oldload"); 2647 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 2648 } else if (IntTy && !IsWholeAlloca && IsDest) { 2649 Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), 2650 "oldload"); 2651 Old = convertValue(DL, IRB, Old, IntTy); 2652 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2653 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 2654 Src = convertValue(DL, IRB, Src, NewAllocaTy); 2655 } 2656 2657 StoreInst *Store = cast<StoreInst>( 2658 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 2659 (void)Store; 2660 DEBUG(dbgs() << " to: " << *Store << "\n"); 2661 return !II.isVolatile(); 2662 } 2663 2664 bool visitIntrinsicInst(IntrinsicInst &II) { 2665 assert(II.getIntrinsicID() == Intrinsic::lifetime_start || 2666 II.getIntrinsicID() == Intrinsic::lifetime_end); 2667 DEBUG(dbgs() << " original: " << II << "\n"); 2668 assert(II.getArgOperand(1) == OldPtr); 2669 2670 // Record this instruction for deletion. 2671 Pass.DeadInsts.insert(&II); 2672 2673 ConstantInt *Size 2674 = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 2675 NewEndOffset - NewBeginOffset); 2676 Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2677 Value *New; 2678 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 2679 New = IRB.CreateLifetimeStart(Ptr, Size); 2680 else 2681 New = IRB.CreateLifetimeEnd(Ptr, Size); 2682 2683 (void)New; 2684 DEBUG(dbgs() << " to: " << *New << "\n"); 2685 return true; 2686 } 2687 2688 bool visitPHINode(PHINode &PN) { 2689 DEBUG(dbgs() << " original: " << PN << "\n"); 2690 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 2691 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 2692 2693 // We would like to compute a new pointer in only one place, but have it be 2694 // as local as possible to the PHI. To do that, we re-use the location of 2695 // the old pointer, which necessarily must be in the right position to 2696 // dominate the PHI. 2697 IRBuilderTy PtrBuilder(IRB); 2698 PtrBuilder.SetInsertPoint(OldPtr); 2699 PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 2700 2701 Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); 2702 // Replace the operands which were using the old pointer. 2703 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 2704 2705 DEBUG(dbgs() << " to: " << PN << "\n"); 2706 deleteIfTriviallyDead(OldPtr); 2707 2708 // PHIs can't be promoted on their own, but often can be speculated. We 2709 // check the speculation outside of the rewriter so that we see the 2710 // fully-rewritten alloca. 2711 PHIUsers.insert(&PN); 2712 return true; 2713 } 2714 2715 bool visitSelectInst(SelectInst &SI) { 2716 DEBUG(dbgs() << " original: " << SI << "\n"); 2717 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 2718 "Pointer isn't an operand!"); 2719 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 2720 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 2721 2722 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2723 // Replace the operands which were using the old pointer. 2724 if (SI.getOperand(1) == OldPtr) 2725 SI.setOperand(1, NewPtr); 2726 if (SI.getOperand(2) == OldPtr) 2727 SI.setOperand(2, NewPtr); 2728 2729 DEBUG(dbgs() << " to: " << SI << "\n"); 2730 deleteIfTriviallyDead(OldPtr); 2731 2732 // Selects can't be promoted on their own, but often can be speculated. We 2733 // check the speculation outside of the rewriter so that we see the 2734 // fully-rewritten alloca. 2735 SelectUsers.insert(&SI); 2736 return true; 2737 } 2738 2739}; 2740} 2741 2742namespace { 2743/// \brief Visitor to rewrite aggregate loads and stores as scalar. 2744/// 2745/// This pass aggressively rewrites all aggregate loads and stores on 2746/// a particular pointer (or any pointer derived from it which we can identify) 2747/// with scalar loads and stores. 2748class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 2749 // Befriend the base class so it can delegate to private visit methods. 2750 friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>; 2751 2752 const DataLayout &DL; 2753 2754 /// Queue of pointer uses to analyze and potentially rewrite. 2755 SmallVector<Use *, 8> Queue; 2756 2757 /// Set to prevent us from cycling with phi nodes and loops. 2758 SmallPtrSet<User *, 8> Visited; 2759 2760 /// The current pointer use being rewritten. This is used to dig up the used 2761 /// value (as opposed to the user). 2762 Use *U; 2763 2764public: 2765 AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {} 2766 2767 /// Rewrite loads and stores through a pointer and all pointers derived from 2768 /// it. 2769 bool rewrite(Instruction &I) { 2770 DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 2771 enqueueUsers(I); 2772 bool Changed = false; 2773 while (!Queue.empty()) { 2774 U = Queue.pop_back_val(); 2775 Changed |= visit(cast<Instruction>(U->getUser())); 2776 } 2777 return Changed; 2778 } 2779 2780private: 2781 /// Enqueue all the users of the given instruction for further processing. 2782 /// This uses a set to de-duplicate users. 2783 void enqueueUsers(Instruction &I) { 2784 for (Use &U : I.uses()) 2785 if (Visited.insert(U.getUser())) 2786 Queue.push_back(&U); 2787 } 2788 2789 // Conservative default is to not rewrite anything. 2790 bool visitInstruction(Instruction &I) { return false; } 2791 2792 /// \brief Generic recursive split emission class. 2793 template <typename Derived> 2794 class OpSplitter { 2795 protected: 2796 /// The builder used to form new instructions. 2797 IRBuilderTy IRB; 2798 /// The indices which to be used with insert- or extractvalue to select the 2799 /// appropriate value within the aggregate. 2800 SmallVector<unsigned, 4> Indices; 2801 /// The indices to a GEP instruction which will move Ptr to the correct slot 2802 /// within the aggregate. 2803 SmallVector<Value *, 4> GEPIndices; 2804 /// The base pointer of the original op, used as a base for GEPing the 2805 /// split operations. 2806 Value *Ptr; 2807 2808 /// Initialize the splitter with an insertion point, Ptr and start with a 2809 /// single zero GEP index. 2810 OpSplitter(Instruction *InsertionPoint, Value *Ptr) 2811 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} 2812 2813 public: 2814 /// \brief Generic recursive split emission routine. 2815 /// 2816 /// This method recursively splits an aggregate op (load or store) into 2817 /// scalar or vector ops. It splits recursively until it hits a single value 2818 /// and emits that single value operation via the template argument. 2819 /// 2820 /// The logic of this routine relies on GEPs and insertvalue and 2821 /// extractvalue all operating with the same fundamental index list, merely 2822 /// formatted differently (GEPs need actual values). 2823 /// 2824 /// \param Ty The type being split recursively into smaller ops. 2825 /// \param Agg The aggregate value being built up or stored, depending on 2826 /// whether this is splitting a load or a store respectively. 2827 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 2828 if (Ty->isSingleValueType()) 2829 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name); 2830 2831 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 2832 unsigned OldSize = Indices.size(); 2833 (void)OldSize; 2834 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 2835 ++Idx) { 2836 assert(Indices.size() == OldSize && "Did not return to the old size"); 2837 Indices.push_back(Idx); 2838 GEPIndices.push_back(IRB.getInt32(Idx)); 2839 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 2840 GEPIndices.pop_back(); 2841 Indices.pop_back(); 2842 } 2843 return; 2844 } 2845 2846 if (StructType *STy = dyn_cast<StructType>(Ty)) { 2847 unsigned OldSize = Indices.size(); 2848 (void)OldSize; 2849 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 2850 ++Idx) { 2851 assert(Indices.size() == OldSize && "Did not return to the old size"); 2852 Indices.push_back(Idx); 2853 GEPIndices.push_back(IRB.getInt32(Idx)); 2854 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 2855 GEPIndices.pop_back(); 2856 Indices.pop_back(); 2857 } 2858 return; 2859 } 2860 2861 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 2862 } 2863 }; 2864 2865 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 2866 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr) 2867 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {} 2868 2869 /// Emit a leaf load of a single value. This is called at the leaves of the 2870 /// recursive emission to actually load values. 2871 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { 2872 assert(Ty->isSingleValueType()); 2873 // Load the single value and insert it using the indices. 2874 Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"); 2875 Value *Load = IRB.CreateLoad(GEP, Name + ".load"); 2876 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 2877 DEBUG(dbgs() << " to: " << *Load << "\n"); 2878 } 2879 }; 2880 2881 bool visitLoadInst(LoadInst &LI) { 2882 assert(LI.getPointerOperand() == *U); 2883 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 2884 return false; 2885 2886 // We have an aggregate being loaded, split it apart. 2887 DEBUG(dbgs() << " original: " << LI << "\n"); 2888 LoadOpSplitter Splitter(&LI, *U); 2889 Value *V = UndefValue::get(LI.getType()); 2890 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 2891 LI.replaceAllUsesWith(V); 2892 LI.eraseFromParent(); 2893 return true; 2894 } 2895 2896 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 2897 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr) 2898 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {} 2899 2900 /// Emit a leaf store of a single value. This is called at the leaves of the 2901 /// recursive emission to actually produce stores. 2902 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { 2903 assert(Ty->isSingleValueType()); 2904 // Extract the single value and store it using the indices. 2905 Value *Store = IRB.CreateStore( 2906 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"), 2907 IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep")); 2908 (void)Store; 2909 DEBUG(dbgs() << " to: " << *Store << "\n"); 2910 } 2911 }; 2912 2913 bool visitStoreInst(StoreInst &SI) { 2914 if (!SI.isSimple() || SI.getPointerOperand() != *U) 2915 return false; 2916 Value *V = SI.getValueOperand(); 2917 if (V->getType()->isSingleValueType()) 2918 return false; 2919 2920 // We have an aggregate being stored, split it apart. 2921 DEBUG(dbgs() << " original: " << SI << "\n"); 2922 StoreOpSplitter Splitter(&SI, *U); 2923 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 2924 SI.eraseFromParent(); 2925 return true; 2926 } 2927 2928 bool visitBitCastInst(BitCastInst &BC) { 2929 enqueueUsers(BC); 2930 return false; 2931 } 2932 2933 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 2934 enqueueUsers(GEPI); 2935 return false; 2936 } 2937 2938 bool visitPHINode(PHINode &PN) { 2939 enqueueUsers(PN); 2940 return false; 2941 } 2942 2943 bool visitSelectInst(SelectInst &SI) { 2944 enqueueUsers(SI); 2945 return false; 2946 } 2947}; 2948} 2949 2950/// \brief Strip aggregate type wrapping. 2951/// 2952/// This removes no-op aggregate types wrapping an underlying type. It will 2953/// strip as many layers of types as it can without changing either the type 2954/// size or the allocated size. 2955static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 2956 if (Ty->isSingleValueType()) 2957 return Ty; 2958 2959 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 2960 uint64_t TypeSize = DL.getTypeSizeInBits(Ty); 2961 2962 Type *InnerTy; 2963 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 2964 InnerTy = ArrTy->getElementType(); 2965 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 2966 const StructLayout *SL = DL.getStructLayout(STy); 2967 unsigned Index = SL->getElementContainingOffset(0); 2968 InnerTy = STy->getElementType(Index); 2969 } else { 2970 return Ty; 2971 } 2972 2973 if (AllocSize > DL.getTypeAllocSize(InnerTy) || 2974 TypeSize > DL.getTypeSizeInBits(InnerTy)) 2975 return Ty; 2976 2977 return stripAggregateTypeWrapping(DL, InnerTy); 2978} 2979 2980/// \brief Try to find a partition of the aggregate type passed in for a given 2981/// offset and size. 2982/// 2983/// This recurses through the aggregate type and tries to compute a subtype 2984/// based on the offset and size. When the offset and size span a sub-section 2985/// of an array, it will even compute a new array type for that sub-section, 2986/// and the same for structs. 2987/// 2988/// Note that this routine is very strict and tries to find a partition of the 2989/// type which produces the *exact* right offset and size. It is not forgiving 2990/// when the size or offset cause either end of type-based partition to be off. 2991/// Also, this is a best-effort routine. It is reasonable to give up and not 2992/// return a type if necessary. 2993static Type *getTypePartition(const DataLayout &DL, Type *Ty, 2994 uint64_t Offset, uint64_t Size) { 2995 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) 2996 return stripAggregateTypeWrapping(DL, Ty); 2997 if (Offset > DL.getTypeAllocSize(Ty) || 2998 (DL.getTypeAllocSize(Ty) - Offset) < Size) 2999 return nullptr; 3000 3001 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) { 3002 // We can't partition pointers... 3003 if (SeqTy->isPointerTy()) 3004 return nullptr; 3005 3006 Type *ElementTy = SeqTy->getElementType(); 3007 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3008 uint64_t NumSkippedElements = Offset / ElementSize; 3009 if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) { 3010 if (NumSkippedElements >= ArrTy->getNumElements()) 3011 return nullptr; 3012 } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) { 3013 if (NumSkippedElements >= VecTy->getNumElements()) 3014 return nullptr; 3015 } 3016 Offset -= NumSkippedElements * ElementSize; 3017 3018 // First check if we need to recurse. 3019 if (Offset > 0 || Size < ElementSize) { 3020 // Bail if the partition ends in a different array element. 3021 if ((Offset + Size) > ElementSize) 3022 return nullptr; 3023 // Recurse through the element type trying to peel off offset bytes. 3024 return getTypePartition(DL, ElementTy, Offset, Size); 3025 } 3026 assert(Offset == 0); 3027 3028 if (Size == ElementSize) 3029 return stripAggregateTypeWrapping(DL, ElementTy); 3030 assert(Size > ElementSize); 3031 uint64_t NumElements = Size / ElementSize; 3032 if (NumElements * ElementSize != Size) 3033 return nullptr; 3034 return ArrayType::get(ElementTy, NumElements); 3035 } 3036 3037 StructType *STy = dyn_cast<StructType>(Ty); 3038 if (!STy) 3039 return nullptr; 3040 3041 const StructLayout *SL = DL.getStructLayout(STy); 3042 if (Offset >= SL->getSizeInBytes()) 3043 return nullptr; 3044 uint64_t EndOffset = Offset + Size; 3045 if (EndOffset > SL->getSizeInBytes()) 3046 return nullptr; 3047 3048 unsigned Index = SL->getElementContainingOffset(Offset); 3049 Offset -= SL->getElementOffset(Index); 3050 3051 Type *ElementTy = STy->getElementType(Index); 3052 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3053 if (Offset >= ElementSize) 3054 return nullptr; // The offset points into alignment padding. 3055 3056 // See if any partition must be contained by the element. 3057 if (Offset > 0 || Size < ElementSize) { 3058 if ((Offset + Size) > ElementSize) 3059 return nullptr; 3060 return getTypePartition(DL, ElementTy, Offset, Size); 3061 } 3062 assert(Offset == 0); 3063 3064 if (Size == ElementSize) 3065 return stripAggregateTypeWrapping(DL, ElementTy); 3066 3067 StructType::element_iterator EI = STy->element_begin() + Index, 3068 EE = STy->element_end(); 3069 if (EndOffset < SL->getSizeInBytes()) { 3070 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3071 if (Index == EndIndex) 3072 return nullptr; // Within a single element and its padding. 3073 3074 // Don't try to form "natural" types if the elements don't line up with the 3075 // expected size. 3076 // FIXME: We could potentially recurse down through the last element in the 3077 // sub-struct to find a natural end point. 3078 if (SL->getElementOffset(EndIndex) != EndOffset) 3079 return nullptr; 3080 3081 assert(Index < EndIndex); 3082 EE = STy->element_begin() + EndIndex; 3083 } 3084 3085 // Try to build up a sub-structure. 3086 StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE), 3087 STy->isPacked()); 3088 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3089 if (Size != SubSL->getSizeInBytes()) 3090 return nullptr; // The sub-struct doesn't have quite the size needed. 3091 3092 return SubTy; 3093} 3094 3095/// \brief Rewrite an alloca partition's users. 3096/// 3097/// This routine drives both of the rewriting goals of the SROA pass. It tries 3098/// to rewrite uses of an alloca partition to be conducive for SSA value 3099/// promotion. If the partition needs a new, more refined alloca, this will 3100/// build that new alloca, preserving as much type information as possible, and 3101/// rewrite the uses of the old alloca to point at the new one and have the 3102/// appropriate new offsets. It also evaluates how successful the rewrite was 3103/// at enabling promotion and if it was successful queues the alloca to be 3104/// promoted. 3105bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, 3106 AllocaSlices::iterator B, AllocaSlices::iterator E, 3107 int64_t BeginOffset, int64_t EndOffset, 3108 ArrayRef<AllocaSlices::iterator> SplitUses) { 3109 assert(BeginOffset < EndOffset); 3110 uint64_t SliceSize = EndOffset - BeginOffset; 3111 3112 // Try to compute a friendly type for this partition of the alloca. This 3113 // won't always succeed, in which case we fall back to a legal integer type 3114 // or an i8 array of an appropriate size. 3115 Type *SliceTy = nullptr; 3116 if (Type *CommonUseTy = findCommonType(B, E, EndOffset)) 3117 if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize) 3118 SliceTy = CommonUseTy; 3119 if (!SliceTy) 3120 if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(), 3121 BeginOffset, SliceSize)) 3122 SliceTy = TypePartitionTy; 3123 if ((!SliceTy || (SliceTy->isArrayTy() && 3124 SliceTy->getArrayElementType()->isIntegerTy())) && 3125 DL->isLegalInteger(SliceSize * 8)) 3126 SliceTy = Type::getIntNTy(*C, SliceSize * 8); 3127 if (!SliceTy) 3128 SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize); 3129 assert(DL->getTypeAllocSize(SliceTy) >= SliceSize); 3130 3131 bool IsVectorPromotable = isVectorPromotionViable( 3132 *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses); 3133 3134 bool IsIntegerPromotable = 3135 !IsVectorPromotable && 3136 isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses); 3137 3138 // Check for the case where we're going to rewrite to a new alloca of the 3139 // exact same type as the original, and with the same access offsets. In that 3140 // case, re-use the existing alloca, but still run through the rewriter to 3141 // perform phi and select speculation. 3142 AllocaInst *NewAI; 3143 if (SliceTy == AI.getAllocatedType()) { 3144 assert(BeginOffset == 0 && 3145 "Non-zero begin offset but same alloca type"); 3146 NewAI = &AI; 3147 // FIXME: We should be able to bail at this point with "nothing changed". 3148 // FIXME: We might want to defer PHI speculation until after here. 3149 } else { 3150 unsigned Alignment = AI.getAlignment(); 3151 if (!Alignment) { 3152 // The minimum alignment which users can rely on when the explicit 3153 // alignment is omitted or zero is that required by the ABI for this 3154 // type. 3155 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 3156 } 3157 Alignment = MinAlign(Alignment, BeginOffset); 3158 // If we will get at least this much alignment from the type alone, leave 3159 // the alloca's alignment unconstrained. 3160 if (Alignment <= DL->getABITypeAlignment(SliceTy)) 3161 Alignment = 0; 3162 NewAI = new AllocaInst(SliceTy, nullptr, Alignment, 3163 AI.getName() + ".sroa." + Twine(B - S.begin()), &AI); 3164 ++NumNewAllocas; 3165 } 3166 3167 DEBUG(dbgs() << "Rewriting alloca partition " 3168 << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI 3169 << "\n"); 3170 3171 // Track the high watermark on the worklist as it is only relevant for 3172 // promoted allocas. We will reset it to this point if the alloca is not in 3173 // fact scheduled for promotion. 3174 unsigned PPWOldSize = PostPromotionWorklist.size(); 3175 unsigned NumUses = 0; 3176 SmallPtrSet<PHINode *, 8> PHIUsers; 3177 SmallPtrSet<SelectInst *, 8> SelectUsers; 3178 3179 AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset, 3180 EndOffset, IsVectorPromotable, 3181 IsIntegerPromotable, PHIUsers, SelectUsers); 3182 bool Promotable = true; 3183 for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(), 3184 SUE = SplitUses.end(); 3185 SUI != SUE; ++SUI) { 3186 DEBUG(dbgs() << " rewriting split "); 3187 DEBUG(S.printSlice(dbgs(), *SUI, "")); 3188 Promotable &= Rewriter.visit(*SUI); 3189 ++NumUses; 3190 } 3191 for (AllocaSlices::iterator I = B; I != E; ++I) { 3192 DEBUG(dbgs() << " rewriting "); 3193 DEBUG(S.printSlice(dbgs(), I, "")); 3194 Promotable &= Rewriter.visit(I); 3195 ++NumUses; 3196 } 3197 3198 NumAllocaPartitionUses += NumUses; 3199 MaxUsesPerAllocaPartition = 3200 std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition); 3201 3202 // Now that we've processed all the slices in the new partition, check if any 3203 // PHIs or Selects would block promotion. 3204 for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(), 3205 E = PHIUsers.end(); 3206 I != E; ++I) 3207 if (!isSafePHIToSpeculate(**I, DL)) { 3208 Promotable = false; 3209 PHIUsers.clear(); 3210 SelectUsers.clear(); 3211 break; 3212 } 3213 for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(), 3214 E = SelectUsers.end(); 3215 I != E; ++I) 3216 if (!isSafeSelectToSpeculate(**I, DL)) { 3217 Promotable = false; 3218 PHIUsers.clear(); 3219 SelectUsers.clear(); 3220 break; 3221 } 3222 3223 if (Promotable) { 3224 if (PHIUsers.empty() && SelectUsers.empty()) { 3225 // Promote the alloca. 3226 PromotableAllocas.push_back(NewAI); 3227 } else { 3228 // If we have either PHIs or Selects to speculate, add them to those 3229 // worklists and re-queue the new alloca so that we promote in on the 3230 // next iteration. 3231 for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(), 3232 E = PHIUsers.end(); 3233 I != E; ++I) 3234 SpeculatablePHIs.insert(*I); 3235 for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(), 3236 E = SelectUsers.end(); 3237 I != E; ++I) 3238 SpeculatableSelects.insert(*I); 3239 Worklist.insert(NewAI); 3240 } 3241 } else { 3242 // If we can't promote the alloca, iterate on it to check for new 3243 // refinements exposed by splitting the current alloca. Don't iterate on an 3244 // alloca which didn't actually change and didn't get promoted. 3245 if (NewAI != &AI) 3246 Worklist.insert(NewAI); 3247 3248 // Drop any post-promotion work items if promotion didn't happen. 3249 while (PostPromotionWorklist.size() > PPWOldSize) 3250 PostPromotionWorklist.pop_back(); 3251 } 3252 3253 return true; 3254} 3255 3256static void 3257removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses, 3258 uint64_t &MaxSplitUseEndOffset, uint64_t Offset) { 3259 if (Offset >= MaxSplitUseEndOffset) { 3260 SplitUses.clear(); 3261 MaxSplitUseEndOffset = 0; 3262 return; 3263 } 3264 3265 size_t SplitUsesOldSize = SplitUses.size(); 3266 SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(), 3267 [Offset](const AllocaSlices::iterator &I) { 3268 return I->endOffset() <= Offset; 3269 }), 3270 SplitUses.end()); 3271 if (SplitUsesOldSize == SplitUses.size()) 3272 return; 3273 3274 // Recompute the max. While this is linear, so is remove_if. 3275 MaxSplitUseEndOffset = 0; 3276 for (SmallVectorImpl<AllocaSlices::iterator>::iterator 3277 SUI = SplitUses.begin(), 3278 SUE = SplitUses.end(); 3279 SUI != SUE; ++SUI) 3280 MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset); 3281} 3282 3283/// \brief Walks the slices of an alloca and form partitions based on them, 3284/// rewriting each of their uses. 3285bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { 3286 if (S.begin() == S.end()) 3287 return false; 3288 3289 unsigned NumPartitions = 0; 3290 bool Changed = false; 3291 SmallVector<AllocaSlices::iterator, 4> SplitUses; 3292 uint64_t MaxSplitUseEndOffset = 0; 3293 3294 uint64_t BeginOffset = S.begin()->beginOffset(); 3295 3296 for (AllocaSlices::iterator SI = S.begin(), SJ = std::next(SI), SE = S.end(); 3297 SI != SE; SI = SJ) { 3298 uint64_t MaxEndOffset = SI->endOffset(); 3299 3300 if (!SI->isSplittable()) { 3301 // When we're forming an unsplittable region, it must always start at the 3302 // first slice and will extend through its end. 3303 assert(BeginOffset == SI->beginOffset()); 3304 3305 // Form a partition including all of the overlapping slices with this 3306 // unsplittable slice. 3307 while (SJ != SE && SJ->beginOffset() < MaxEndOffset) { 3308 if (!SJ->isSplittable()) 3309 MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset()); 3310 ++SJ; 3311 } 3312 } else { 3313 assert(SI->isSplittable()); // Established above. 3314 3315 // Collect all of the overlapping splittable slices. 3316 while (SJ != SE && SJ->beginOffset() < MaxEndOffset && 3317 SJ->isSplittable()) { 3318 MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset()); 3319 ++SJ; 3320 } 3321 3322 // Back up MaxEndOffset and SJ if we ended the span early when 3323 // encountering an unsplittable slice. 3324 if (SJ != SE && SJ->beginOffset() < MaxEndOffset) { 3325 assert(!SJ->isSplittable()); 3326 MaxEndOffset = SJ->beginOffset(); 3327 } 3328 } 3329 3330 // Check if we have managed to move the end offset forward yet. If so, 3331 // we'll have to rewrite uses and erase old split uses. 3332 if (BeginOffset < MaxEndOffset) { 3333 // Rewrite a sequence of overlapping slices. 3334 Changed |= 3335 rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses); 3336 ++NumPartitions; 3337 3338 removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset); 3339 } 3340 3341 // Accumulate all the splittable slices from the [SI,SJ) region which 3342 // overlap going forward. 3343 for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK) 3344 if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) { 3345 SplitUses.push_back(SK); 3346 MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset); 3347 } 3348 3349 // If we're already at the end and we have no split uses, we're done. 3350 if (SJ == SE && SplitUses.empty()) 3351 break; 3352 3353 // If we have no split uses or no gap in offsets, we're ready to move to 3354 // the next slice. 3355 if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) { 3356 BeginOffset = SJ->beginOffset(); 3357 continue; 3358 } 3359 3360 // Even if we have split slices, if the next slice is splittable and the 3361 // split slices reach it, we can simply set up the beginning offset of the 3362 // next iteration to bridge between them. 3363 if (SJ != SE && SJ->isSplittable() && 3364 MaxSplitUseEndOffset > SJ->beginOffset()) { 3365 BeginOffset = MaxEndOffset; 3366 continue; 3367 } 3368 3369 // Otherwise, we have a tail of split slices. Rewrite them with an empty 3370 // range of slices. 3371 uint64_t PostSplitEndOffset = 3372 SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset(); 3373 3374 Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset, 3375 SplitUses); 3376 ++NumPartitions; 3377 3378 if (SJ == SE) 3379 break; // Skip the rest, we don't need to do any cleanup. 3380 3381 removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, 3382 PostSplitEndOffset); 3383 3384 // Now just reset the begin offset for the next iteration. 3385 BeginOffset = SJ->beginOffset(); 3386 } 3387 3388 NumAllocaPartitions += NumPartitions; 3389 MaxPartitionsPerAlloca = 3390 std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca); 3391 3392 return Changed; 3393} 3394 3395/// \brief Clobber a use with undef, deleting the used value if it becomes dead. 3396void SROA::clobberUse(Use &U) { 3397 Value *OldV = U; 3398 // Replace the use with an undef value. 3399 U = UndefValue::get(OldV->getType()); 3400 3401 // Check for this making an instruction dead. We have to garbage collect 3402 // all the dead instructions to ensure the uses of any alloca end up being 3403 // minimal. 3404 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 3405 if (isInstructionTriviallyDead(OldI)) { 3406 DeadInsts.insert(OldI); 3407 } 3408} 3409 3410/// \brief Analyze an alloca for SROA. 3411/// 3412/// This analyzes the alloca to ensure we can reason about it, builds 3413/// the slices of the alloca, and then hands it off to be split and 3414/// rewritten as needed. 3415bool SROA::runOnAlloca(AllocaInst &AI) { 3416 DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 3417 ++NumAllocasAnalyzed; 3418 3419 // Special case dead allocas, as they're trivial. 3420 if (AI.use_empty()) { 3421 AI.eraseFromParent(); 3422 return true; 3423 } 3424 3425 // Skip alloca forms that this analysis can't handle. 3426 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || 3427 DL->getTypeAllocSize(AI.getAllocatedType()) == 0) 3428 return false; 3429 3430 bool Changed = false; 3431 3432 // First, split any FCA loads and stores touching this alloca to promote 3433 // better splitting and promotion opportunities. 3434 AggLoadStoreRewriter AggRewriter(*DL); 3435 Changed |= AggRewriter.rewrite(AI); 3436 3437 // Build the slices using a recursive instruction-visiting builder. 3438 AllocaSlices S(*DL, AI); 3439 DEBUG(S.print(dbgs())); 3440 if (S.isEscaped()) 3441 return Changed; 3442 3443 // Delete all the dead users of this alloca before splitting and rewriting it. 3444 for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(), 3445 DE = S.dead_user_end(); 3446 DI != DE; ++DI) { 3447 // Free up everything used by this instruction. 3448 for (Use &DeadOp : (*DI)->operands()) 3449 clobberUse(DeadOp); 3450 3451 // Now replace the uses of this instruction. 3452 (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType())); 3453 3454 // And mark it for deletion. 3455 DeadInsts.insert(*DI); 3456 Changed = true; 3457 } 3458 for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(), 3459 DE = S.dead_op_end(); 3460 DO != DE; ++DO) { 3461 clobberUse(**DO); 3462 Changed = true; 3463 } 3464 3465 // No slices to split. Leave the dead alloca for a later pass to clean up. 3466 if (S.begin() == S.end()) 3467 return Changed; 3468 3469 Changed |= splitAlloca(AI, S); 3470 3471 DEBUG(dbgs() << " Speculating PHIs\n"); 3472 while (!SpeculatablePHIs.empty()) 3473 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); 3474 3475 DEBUG(dbgs() << " Speculating Selects\n"); 3476 while (!SpeculatableSelects.empty()) 3477 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); 3478 3479 return Changed; 3480} 3481 3482/// \brief Delete the dead instructions accumulated in this run. 3483/// 3484/// Recursively deletes the dead instructions we've accumulated. This is done 3485/// at the very end to maximize locality of the recursive delete and to 3486/// minimize the problems of invalidated instruction pointers as such pointers 3487/// are used heavily in the intermediate stages of the algorithm. 3488/// 3489/// We also record the alloca instructions deleted here so that they aren't 3490/// subsequently handed to mem2reg to promote. 3491void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) { 3492 while (!DeadInsts.empty()) { 3493 Instruction *I = DeadInsts.pop_back_val(); 3494 DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 3495 3496 I->replaceAllUsesWith(UndefValue::get(I->getType())); 3497 3498 for (Use &Operand : I->operands()) 3499 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 3500 // Zero out the operand and see if it becomes trivially dead. 3501 Operand = nullptr; 3502 if (isInstructionTriviallyDead(U)) 3503 DeadInsts.insert(U); 3504 } 3505 3506 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 3507 DeletedAllocas.insert(AI); 3508 3509 ++NumDeleted; 3510 I->eraseFromParent(); 3511 } 3512} 3513 3514static void enqueueUsersInWorklist(Instruction &I, 3515 SmallVectorImpl<Instruction *> &Worklist, 3516 SmallPtrSet<Instruction *, 8> &Visited) { 3517 for (User *U : I.users()) 3518 if (Visited.insert(cast<Instruction>(U))) 3519 Worklist.push_back(cast<Instruction>(U)); 3520} 3521 3522/// \brief Promote the allocas, using the best available technique. 3523/// 3524/// This attempts to promote whatever allocas have been identified as viable in 3525/// the PromotableAllocas list. If that list is empty, there is nothing to do. 3526/// If there is a domtree available, we attempt to promote using the full power 3527/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is 3528/// based on the SSAUpdater utilities. This function returns whether any 3529/// promotion occurred. 3530bool SROA::promoteAllocas(Function &F) { 3531 if (PromotableAllocas.empty()) 3532 return false; 3533 3534 NumPromoted += PromotableAllocas.size(); 3535 3536 if (DT && !ForceSSAUpdater) { 3537 DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 3538 PromoteMemToReg(PromotableAllocas, *DT); 3539 PromotableAllocas.clear(); 3540 return true; 3541 } 3542 3543 DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n"); 3544 SSAUpdater SSA; 3545 DIBuilder DIB(*F.getParent()); 3546 SmallVector<Instruction *, 64> Insts; 3547 3548 // We need a worklist to walk the uses of each alloca. 3549 SmallVector<Instruction *, 8> Worklist; 3550 SmallPtrSet<Instruction *, 8> Visited; 3551 SmallVector<Instruction *, 32> DeadInsts; 3552 3553 for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) { 3554 AllocaInst *AI = PromotableAllocas[Idx]; 3555 Insts.clear(); 3556 Worklist.clear(); 3557 Visited.clear(); 3558 3559 enqueueUsersInWorklist(*AI, Worklist, Visited); 3560 3561 while (!Worklist.empty()) { 3562 Instruction *I = Worklist.pop_back_val(); 3563 3564 // FIXME: Currently the SSAUpdater infrastructure doesn't reason about 3565 // lifetime intrinsics and so we strip them (and the bitcasts+GEPs 3566 // leading to them) here. Eventually it should use them to optimize the 3567 // scalar values produced. 3568 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 3569 assert(II->getIntrinsicID() == Intrinsic::lifetime_start || 3570 II->getIntrinsicID() == Intrinsic::lifetime_end); 3571 II->eraseFromParent(); 3572 continue; 3573 } 3574 3575 // Push the loads and stores we find onto the list. SROA will already 3576 // have validated that all loads and stores are viable candidates for 3577 // promotion. 3578 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3579 assert(LI->getType() == AI->getAllocatedType()); 3580 Insts.push_back(LI); 3581 continue; 3582 } 3583 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3584 assert(SI->getValueOperand()->getType() == AI->getAllocatedType()); 3585 Insts.push_back(SI); 3586 continue; 3587 } 3588 3589 // For everything else, we know that only no-op bitcasts and GEPs will 3590 // make it this far, just recurse through them and recall them for later 3591 // removal. 3592 DeadInsts.push_back(I); 3593 enqueueUsersInWorklist(*I, Worklist, Visited); 3594 } 3595 AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts); 3596 while (!DeadInsts.empty()) 3597 DeadInsts.pop_back_val()->eraseFromParent(); 3598 AI->eraseFromParent(); 3599 } 3600 3601 PromotableAllocas.clear(); 3602 return true; 3603} 3604 3605bool SROA::runOnFunction(Function &F) { 3606 if (skipOptnoneFunction(F)) 3607 return false; 3608 3609 DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 3610 C = &F.getContext(); 3611 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 3612 if (!DLP) { 3613 DEBUG(dbgs() << " Skipping SROA -- no target data!\n"); 3614 return false; 3615 } 3616 DL = &DLP->getDataLayout(); 3617 DominatorTreeWrapperPass *DTWP = 3618 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 3619 DT = DTWP ? &DTWP->getDomTree() : nullptr; 3620 3621 BasicBlock &EntryBB = F.getEntryBlock(); 3622 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 3623 I != E; ++I) 3624 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 3625 Worklist.insert(AI); 3626 3627 bool Changed = false; 3628 // A set of deleted alloca instruction pointers which should be removed from 3629 // the list of promotable allocas. 3630 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 3631 3632 do { 3633 while (!Worklist.empty()) { 3634 Changed |= runOnAlloca(*Worklist.pop_back_val()); 3635 deleteDeadInstructions(DeletedAllocas); 3636 3637 // Remove the deleted allocas from various lists so that we don't try to 3638 // continue processing them. 3639 if (!DeletedAllocas.empty()) { 3640 auto IsInSet = [&](AllocaInst *AI) { 3641 return DeletedAllocas.count(AI); 3642 }; 3643 Worklist.remove_if(IsInSet); 3644 PostPromotionWorklist.remove_if(IsInSet); 3645 PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), 3646 PromotableAllocas.end(), 3647 IsInSet), 3648 PromotableAllocas.end()); 3649 DeletedAllocas.clear(); 3650 } 3651 } 3652 3653 Changed |= promoteAllocas(F); 3654 3655 Worklist = PostPromotionWorklist; 3656 PostPromotionWorklist.clear(); 3657 } while (!Worklist.empty()); 3658 3659 return Changed; 3660} 3661 3662void SROA::getAnalysisUsage(AnalysisUsage &AU) const { 3663 if (RequiresDomTree) 3664 AU.addRequired<DominatorTreeWrapperPass>(); 3665 AU.setPreservesCFG(); 3666} 3667