ScalarReplAggregates.cpp revision 593375d04ab32be0161607a741d310172f142b93
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This transformation implements the well known scalar replacement of
11// aggregates transformation.  This xform breaks up alloca instructions of
12// aggregate type (structure or array) into individual alloca instructions for
13// each member (if possible).  Then, if possible, it transforms the individual
14// alloca instructions into nice clean scalar SSA form.
15//
16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17// often interact, especially for C++ programs.  As such, iterating between
18// SRoA, then Mem2Reg until we run out of things to promote works well.
19//
20//===----------------------------------------------------------------------===//
21
22#define DEBUG_TYPE "scalarrepl"
23#include "llvm/Transforms/Scalar.h"
24#include "llvm/Constants.h"
25#include "llvm/DerivedTypes.h"
26#include "llvm/Function.h"
27#include "llvm/GlobalVariable.h"
28#include "llvm/Instructions.h"
29#include "llvm/IntrinsicInst.h"
30#include "llvm/LLVMContext.h"
31#include "llvm/Pass.h"
32#include "llvm/Analysis/Dominators.h"
33#include "llvm/Target/TargetData.h"
34#include "llvm/Transforms/Utils/PromoteMemToReg.h"
35#include "llvm/Transforms/Utils/Local.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/GetElementPtrTypeIterator.h"
39#include "llvm/Support/IRBuilder.h"
40#include "llvm/Support/MathExtras.h"
41#include "llvm/Support/raw_ostream.h"
42#include "llvm/ADT/SmallVector.h"
43#include "llvm/ADT/Statistic.h"
44using namespace llvm;
45
46STATISTIC(NumReplaced,  "Number of allocas broken up");
47STATISTIC(NumPromoted,  "Number of allocas promoted");
48STATISTIC(NumConverted, "Number of aggregates converted to scalar");
49STATISTIC(NumGlobals,   "Number of allocas copied from constant global");
50
51namespace {
52  struct ConvertToScalarInfo;
53
54  struct SROA : public FunctionPass {
55    static char ID; // Pass identification, replacement for typeid
56    explicit SROA(signed T = -1) : FunctionPass(&ID) {
57      if (T == -1)
58        SRThreshold = 128;
59      else
60        SRThreshold = T;
61    }
62
63    bool runOnFunction(Function &F);
64
65    bool performScalarRepl(Function &F);
66    bool performPromotion(Function &F);
67
68    // getAnalysisUsage - This pass does not require any passes, but we know it
69    // will not alter the CFG, so say so.
70    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
71      AU.addRequired<DominatorTree>();
72      AU.addRequired<DominanceFrontier>();
73      AU.setPreservesCFG();
74    }
75
76  private:
77    TargetData *TD;
78
79    /// DeadInsts - Keep track of instructions we have made dead, so that
80    /// we can remove them after we are done working.
81    SmallVector<Value*, 32> DeadInsts;
82
83    /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
84    /// information about the uses.  All these fields are initialized to false
85    /// and set to true when something is learned.
86    struct AllocaInfo {
87      /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
88      bool isUnsafe : 1;
89
90      /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
91      bool isMemCpySrc : 1;
92
93      /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
94      bool isMemCpyDst : 1;
95
96      AllocaInfo()
97        : isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false) {}
98    };
99
100    unsigned SRThreshold;
101
102    void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
103
104    bool isSafeAllocaToScalarRepl(AllocaInst *AI);
105
106    void isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
107                             AllocaInfo &Info);
108    void isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t &Offset,
109                   AllocaInfo &Info);
110    void isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize,
111                         const Type *MemOpType, bool isStore, AllocaInfo &Info);
112    bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size);
113    uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset,
114                                  const Type *&IdxTy);
115
116    void DoScalarReplacement(AllocaInst *AI,
117                             std::vector<AllocaInst*> &WorkList);
118    void DeleteDeadInstructions();
119    AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
120
121    void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
122                              SmallVector<AllocaInst*, 32> &NewElts);
123    void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
124                        SmallVector<AllocaInst*, 32> &NewElts);
125    void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
126                    SmallVector<AllocaInst*, 32> &NewElts);
127    void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
128                                      AllocaInst *AI,
129                                      SmallVector<AllocaInst*, 32> &NewElts);
130    void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
131                                       SmallVector<AllocaInst*, 32> &NewElts);
132    void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
133                                      SmallVector<AllocaInst*, 32> &NewElts);
134
135    static MemTransferInst *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
136  };
137}
138
139char SROA::ID = 0;
140static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
141
142// Public interface to the ScalarReplAggregates pass
143FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
144  return new SROA(Threshold);
145}
146
147
148bool SROA::runOnFunction(Function &F) {
149  TD = getAnalysisIfAvailable<TargetData>();
150
151  bool Changed = performPromotion(F);
152
153  // FIXME: ScalarRepl currently depends on TargetData more than it
154  // theoretically needs to. It should be refactored in order to support
155  // target-independent IR. Until this is done, just skip the actual
156  // scalar-replacement portion of this pass.
157  if (!TD) return Changed;
158
159  while (1) {
160    bool LocalChange = performScalarRepl(F);
161    if (!LocalChange) break;   // No need to repromote if no scalarrepl
162    Changed = true;
163    LocalChange = performPromotion(F);
164    if (!LocalChange) break;   // No need to re-scalarrepl if no promotion
165  }
166
167  return Changed;
168}
169
170
171bool SROA::performPromotion(Function &F) {
172  std::vector<AllocaInst*> Allocas;
173  DominatorTree         &DT = getAnalysis<DominatorTree>();
174  DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
175
176  BasicBlock &BB = F.getEntryBlock();  // Get the entry node for the function
177
178  bool Changed = false;
179
180  while (1) {
181    Allocas.clear();
182
183    // Find allocas that are safe to promote, by looking at all instructions in
184    // the entry node
185    for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
186      if (AllocaInst *AI = dyn_cast<AllocaInst>(I))       // Is it an alloca?
187        if (isAllocaPromotable(AI))
188          Allocas.push_back(AI);
189
190    if (Allocas.empty()) break;
191
192    PromoteMemToReg(Allocas, DT, DF);
193    NumPromoted += Allocas.size();
194    Changed = true;
195  }
196
197  return Changed;
198}
199
200/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
201/// SROA.  It must be a struct or array type with a small number of elements.
202static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
203  const Type *T = AI->getAllocatedType();
204  // Do not promote any struct into more than 32 separate vars.
205  if (const StructType *ST = dyn_cast<StructType>(T))
206    return ST->getNumElements() <= 32;
207  // Arrays are much less likely to be safe for SROA; only consider
208  // them if they are very small.
209  if (const ArrayType *AT = dyn_cast<ArrayType>(T))
210    return AT->getNumElements() <= 8;
211  return false;
212}
213
214namespace {
215/// ConvertToScalarInfo - This struct is used by CanConvertToScalar
216struct ConvertToScalarInfo {
217  /// AllocaSize - The size of the alloca being considered.
218  unsigned AllocaSize;
219  const TargetData &TD;
220
221  bool IsNotTrivial;
222  const Type *VectorTy;
223  bool HadAVector;
224
225  explicit ConvertToScalarInfo(unsigned Size, const TargetData &td)
226    : AllocaSize(Size), TD(td) {
227    IsNotTrivial = false;
228    VectorTy = 0;
229    HadAVector = false;
230  }
231
232  bool shouldConvertToVector() const {
233    return VectorTy && VectorTy->isVectorTy() && HadAVector;
234  }
235
236  AllocaInst *TryConvert(AllocaInst *AI) {
237    // If we can't convert this scalar, or if mem2reg can trivially do it, bail
238    // out.
239    if (!CanConvertToScalar(AI, 0) || !IsNotTrivial)
240      // FIXME: In the trivial case, just use mem2reg.
241      return 0;
242
243    // If we were able to find a vector type that can handle this with
244    // insert/extract elements, and if there was at least one use that had
245    // a vector type, promote this to a vector.  We don't want to promote
246    // random stuff that doesn't use vectors (e.g. <9 x double>) because then
247    // we just get a lot of insert/extracts.  If at least one vector is
248    // involved, then we probably really do have a union of vector/array.
249    const Type *NewTy;
250    if (shouldConvertToVector()) {
251      DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n  TYPE = "
252                   << *VectorTy << '\n');
253      NewTy = VectorTy;  // Use the vector type.
254    } else {
255      DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
256      // Create and insert the integer alloca.
257      NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
258    }
259    AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
260    ConvertUsesToScalar(AI, NewAI, 0);
261    return NewAI;
262  }
263
264  bool CanConvertToScalar(Value *V, uint64_t Offset);
265  void MergeInType(const Type *In, uint64_t Offset);
266  void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
267
268  Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
269                                    uint64_t Offset, IRBuilder<> &Builder);
270  Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
271                                   uint64_t Offset, IRBuilder<> &Builder);
272};
273} // end anonymous namespace.
274
275
276
277// performScalarRepl - This algorithm is a simple worklist driven algorithm,
278// which runs on all of the malloc/alloca instructions in the function, removing
279// them if they are only used by getelementptr instructions.
280//
281bool SROA::performScalarRepl(Function &F) {
282  std::vector<AllocaInst*> WorkList;
283
284  // Scan the entry basic block, adding allocas to the worklist.
285  BasicBlock &BB = F.getEntryBlock();
286  for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
287    if (AllocaInst *A = dyn_cast<AllocaInst>(I))
288      WorkList.push_back(A);
289
290  // Process the worklist
291  bool Changed = false;
292  while (!WorkList.empty()) {
293    AllocaInst *AI = WorkList.back();
294    WorkList.pop_back();
295
296    // Handle dead allocas trivially.  These can be formed by SROA'ing arrays
297    // with unused elements.
298    if (AI->use_empty()) {
299      AI->eraseFromParent();
300      Changed = true;
301      continue;
302    }
303
304    // If this alloca is impossible for us to promote, reject it early.
305    if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
306      continue;
307
308    // Check to see if this allocation is only modified by a memcpy/memmove from
309    // a constant global.  If this is the case, we can change all users to use
310    // the constant global instead.  This is commonly produced by the CFE by
311    // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
312    // is only subsequently read.
313    if (MemTransferInst *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
314      DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
315      DEBUG(dbgs() << "  memcpy = " << *TheCopy << '\n');
316      Constant *TheSrc = cast<Constant>(TheCopy->getSource());
317      AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
318      TheCopy->eraseFromParent();  // Don't mutate the global.
319      AI->eraseFromParent();
320      ++NumGlobals;
321      Changed = true;
322      continue;
323    }
324
325    // Check to see if we can perform the core SROA transformation.  We cannot
326    // transform the allocation instruction if it is an array allocation
327    // (allocations OF arrays are ok though), and an allocation of a scalar
328    // value cannot be decomposed at all.
329    uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
330
331    // Do not promote [0 x %struct].
332    if (AllocaSize == 0) continue;
333
334    // Do not promote any struct whose size is too big.
335    if (AllocaSize > SRThreshold) continue;
336
337    // If the alloca looks like a good candidate for scalar replacement, and if
338    // all its users can be transformed, then split up the aggregate into its
339    // separate elements.
340    if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) {
341      DoScalarReplacement(AI, WorkList);
342      Changed = true;
343      continue;
344    }
345
346    // If we can turn this aggregate value (potentially with casts) into a
347    // simple scalar value that can be mem2reg'd into a register value.
348    // IsNotTrivial tracks whether this is something that mem2reg could have
349    // promoted itself.  If so, we don't want to transform it needlessly.  Note
350    // that we can't just check based on the type: the alloca may be of an i32
351    // but that has pointer arithmetic to set byte 3 of it or something.
352    if (AllocaInst *NewAI =
353          ConvertToScalarInfo((unsigned)AllocaSize, *TD).TryConvert(AI)) {
354      NewAI->takeName(AI);
355      AI->eraseFromParent();
356      ++NumConverted;
357      Changed = true;
358      continue;
359    }
360
361    // Otherwise, couldn't process this alloca.
362  }
363
364  return Changed;
365}
366
367/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
368/// predicate, do SROA now.
369void SROA::DoScalarReplacement(AllocaInst *AI,
370                               std::vector<AllocaInst*> &WorkList) {
371  DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n');
372  SmallVector<AllocaInst*, 32> ElementAllocas;
373  if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
374    ElementAllocas.reserve(ST->getNumContainedTypes());
375    for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
376      AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
377                                      AI->getAlignment(),
378                                      AI->getName() + "." + Twine(i), AI);
379      ElementAllocas.push_back(NA);
380      WorkList.push_back(NA);  // Add to worklist for recursive processing
381    }
382  } else {
383    const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
384    ElementAllocas.reserve(AT->getNumElements());
385    const Type *ElTy = AT->getElementType();
386    for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
387      AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
388                                      AI->getName() + "." + Twine(i), AI);
389      ElementAllocas.push_back(NA);
390      WorkList.push_back(NA);  // Add to worklist for recursive processing
391    }
392  }
393
394  // Now that we have created the new alloca instructions, rewrite all the
395  // uses of the old alloca.
396  RewriteForScalarRepl(AI, AI, 0, ElementAllocas);
397
398  // Now erase any instructions that were made dead while rewriting the alloca.
399  DeleteDeadInstructions();
400  AI->eraseFromParent();
401
402  NumReplaced++;
403}
404
405/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
406/// recursively including all their operands that become trivially dead.
407void SROA::DeleteDeadInstructions() {
408  while (!DeadInsts.empty()) {
409    Instruction *I = cast<Instruction>(DeadInsts.pop_back_val());
410
411    for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
412      if (Instruction *U = dyn_cast<Instruction>(*OI)) {
413        // Zero out the operand and see if it becomes trivially dead.
414        // (But, don't add allocas to the dead instruction list -- they are
415        // already on the worklist and will be deleted separately.)
416        *OI = 0;
417        if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
418          DeadInsts.push_back(U);
419      }
420
421    I->eraseFromParent();
422  }
423}
424
425/// isSafeForScalarRepl - Check if instruction I is a safe use with regard to
426/// performing scalar replacement of alloca AI.  The results are flagged in
427/// the Info parameter.  Offset indicates the position within AI that is
428/// referenced by this instruction.
429void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
430                               AllocaInfo &Info) {
431  for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
432    Instruction *User = cast<Instruction>(*UI);
433
434    if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
435      isSafeForScalarRepl(BC, AI, Offset, Info);
436    } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
437      uint64_t GEPOffset = Offset;
438      isSafeGEP(GEPI, AI, GEPOffset, Info);
439      if (!Info.isUnsafe)
440        isSafeForScalarRepl(GEPI, AI, GEPOffset, Info);
441    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
442      ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
443      if (Length)
444        isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0,
445                        UI.getOperandNo() == 0, Info);
446      else
447        MarkUnsafe(Info);
448    } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
449      if (!LI->isVolatile()) {
450        const Type *LIType = LI->getType();
451        isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(LIType),
452                        LIType, false, Info);
453      } else
454        MarkUnsafe(Info);
455    } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
456      // Store is ok if storing INTO the pointer, not storing the pointer
457      if (!SI->isVolatile() && SI->getOperand(0) != I) {
458        const Type *SIType = SI->getOperand(0)->getType();
459        isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(SIType),
460                        SIType, true, Info);
461      } else
462        MarkUnsafe(Info);
463    } else {
464      DEBUG(errs() << "  Transformation preventing inst: " << *User << '\n');
465      MarkUnsafe(Info);
466    }
467    if (Info.isUnsafe) return;
468  }
469}
470
471/// isSafeGEP - Check if a GEP instruction can be handled for scalar
472/// replacement.  It is safe when all the indices are constant, in-bounds
473/// references, and when the resulting offset corresponds to an element within
474/// the alloca type.  The results are flagged in the Info parameter.  Upon
475/// return, Offset is adjusted as specified by the GEP indices.
476void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI,
477                     uint64_t &Offset, AllocaInfo &Info) {
478  gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
479  if (GEPIt == E)
480    return;
481
482  // Walk through the GEP type indices, checking the types that this indexes
483  // into.
484  for (; GEPIt != E; ++GEPIt) {
485    // Ignore struct elements, no extra checking needed for these.
486    if ((*GEPIt)->isStructTy())
487      continue;
488
489    ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
490    if (!IdxVal)
491      return MarkUnsafe(Info);
492  }
493
494  // Compute the offset due to this GEP and check if the alloca has a
495  // component element at that offset.
496  SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
497  Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
498                                 &Indices[0], Indices.size());
499  if (!TypeHasComponent(AI->getAllocatedType(), Offset, 0))
500    MarkUnsafe(Info);
501}
502
503/// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI
504/// alloca or has an offset and size that corresponds to a component element
505/// within it.  The offset checked here may have been formed from a GEP with a
506/// pointer bitcasted to a different type.
507void SROA::isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize,
508                           const Type *MemOpType, bool isStore,
509                           AllocaInfo &Info) {
510  // Check if this is a load/store of the entire alloca.
511  if (Offset == 0 && MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) {
512    bool UsesAggregateType = (MemOpType == AI->getAllocatedType());
513    // This is safe for MemIntrinsics (where MemOpType is 0), integer types
514    // (which are essentially the same as the MemIntrinsics, especially with
515    // regard to copying padding between elements), or references using the
516    // aggregate type of the alloca.
517    if (!MemOpType || MemOpType->isIntegerTy() || UsesAggregateType) {
518      if (!UsesAggregateType) {
519        if (isStore)
520          Info.isMemCpyDst = true;
521        else
522          Info.isMemCpySrc = true;
523      }
524      return;
525    }
526  }
527  // Check if the offset/size correspond to a component within the alloca type.
528  const Type *T = AI->getAllocatedType();
529  if (TypeHasComponent(T, Offset, MemSize))
530    return;
531
532  return MarkUnsafe(Info);
533}
534
535/// TypeHasComponent - Return true if T has a component type with the
536/// specified offset and size.  If Size is zero, do not check the size.
537bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) {
538  const Type *EltTy;
539  uint64_t EltSize;
540  if (const StructType *ST = dyn_cast<StructType>(T)) {
541    const StructLayout *Layout = TD->getStructLayout(ST);
542    unsigned EltIdx = Layout->getElementContainingOffset(Offset);
543    EltTy = ST->getContainedType(EltIdx);
544    EltSize = TD->getTypeAllocSize(EltTy);
545    Offset -= Layout->getElementOffset(EltIdx);
546  } else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
547    EltTy = AT->getElementType();
548    EltSize = TD->getTypeAllocSize(EltTy);
549    if (Offset >= AT->getNumElements() * EltSize)
550      return false;
551    Offset %= EltSize;
552  } else {
553    return false;
554  }
555  if (Offset == 0 && (Size == 0 || EltSize == Size))
556    return true;
557  // Check if the component spans multiple elements.
558  if (Offset + Size > EltSize)
559    return false;
560  return TypeHasComponent(EltTy, Offset, Size);
561}
562
563/// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite
564/// the instruction I, which references it, to use the separate elements.
565/// Offset indicates the position within AI that is referenced by this
566/// instruction.
567void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
568                                SmallVector<AllocaInst*, 32> &NewElts) {
569  for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
570    Instruction *User = cast<Instruction>(*UI);
571
572    if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
573      RewriteBitCast(BC, AI, Offset, NewElts);
574    } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
575      RewriteGEP(GEPI, AI, Offset, NewElts);
576    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
577      ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
578      uint64_t MemSize = Length->getZExtValue();
579      if (Offset == 0 &&
580          MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
581        RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
582      // Otherwise the intrinsic can only touch a single element and the
583      // address operand will be updated, so nothing else needs to be done.
584    } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
585      const Type *LIType = LI->getType();
586      if (LIType == AI->getAllocatedType()) {
587        // Replace:
588        //   %res = load { i32, i32 }* %alloc
589        // with:
590        //   %load.0 = load i32* %alloc.0
591        //   %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
592        //   %load.1 = load i32* %alloc.1
593        //   %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
594        // (Also works for arrays instead of structs)
595        Value *Insert = UndefValue::get(LIType);
596        for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
597          Value *Load = new LoadInst(NewElts[i], "load", LI);
598          Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
599        }
600        LI->replaceAllUsesWith(Insert);
601        DeadInsts.push_back(LI);
602      } else if (LIType->isIntegerTy() &&
603                 TD->getTypeAllocSize(LIType) ==
604                 TD->getTypeAllocSize(AI->getAllocatedType())) {
605        // If this is a load of the entire alloca to an integer, rewrite it.
606        RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
607      }
608    } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
609      Value *Val = SI->getOperand(0);
610      const Type *SIType = Val->getType();
611      if (SIType == AI->getAllocatedType()) {
612        // Replace:
613        //   store { i32, i32 } %val, { i32, i32 }* %alloc
614        // with:
615        //   %val.0 = extractvalue { i32, i32 } %val, 0
616        //   store i32 %val.0, i32* %alloc.0
617        //   %val.1 = extractvalue { i32, i32 } %val, 1
618        //   store i32 %val.1, i32* %alloc.1
619        // (Also works for arrays instead of structs)
620        for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
621          Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI);
622          new StoreInst(Extract, NewElts[i], SI);
623        }
624        DeadInsts.push_back(SI);
625      } else if (SIType->isIntegerTy() &&
626                 TD->getTypeAllocSize(SIType) ==
627                 TD->getTypeAllocSize(AI->getAllocatedType())) {
628        // If this is a store of the entire alloca from an integer, rewrite it.
629        RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
630      }
631    }
632  }
633}
634
635/// RewriteBitCast - Update a bitcast reference to the alloca being replaced
636/// and recursively continue updating all of its uses.
637void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
638                          SmallVector<AllocaInst*, 32> &NewElts) {
639  RewriteForScalarRepl(BC, AI, Offset, NewElts);
640  if (BC->getOperand(0) != AI)
641    return;
642
643  // The bitcast references the original alloca.  Replace its uses with
644  // references to the first new element alloca.
645  Instruction *Val = NewElts[0];
646  if (Val->getType() != BC->getDestTy()) {
647    Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
648    Val->takeName(BC);
649  }
650  BC->replaceAllUsesWith(Val);
651  DeadInsts.push_back(BC);
652}
653
654/// FindElementAndOffset - Return the index of the element containing Offset
655/// within the specified type, which must be either a struct or an array.
656/// Sets T to the type of the element and Offset to the offset within that
657/// element.  IdxTy is set to the type of the index result to be used in a
658/// GEP instruction.
659uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset,
660                                    const Type *&IdxTy) {
661  uint64_t Idx = 0;
662  if (const StructType *ST = dyn_cast<StructType>(T)) {
663    const StructLayout *Layout = TD->getStructLayout(ST);
664    Idx = Layout->getElementContainingOffset(Offset);
665    T = ST->getContainedType(Idx);
666    Offset -= Layout->getElementOffset(Idx);
667    IdxTy = Type::getInt32Ty(T->getContext());
668    return Idx;
669  }
670  const ArrayType *AT = cast<ArrayType>(T);
671  T = AT->getElementType();
672  uint64_t EltSize = TD->getTypeAllocSize(T);
673  Idx = Offset / EltSize;
674  Offset -= Idx * EltSize;
675  IdxTy = Type::getInt64Ty(T->getContext());
676  return Idx;
677}
678
679/// RewriteGEP - Check if this GEP instruction moves the pointer across
680/// elements of the alloca that are being split apart, and if so, rewrite
681/// the GEP to be relative to the new element.
682void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
683                      SmallVector<AllocaInst*, 32> &NewElts) {
684  uint64_t OldOffset = Offset;
685  SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
686  Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
687                                 &Indices[0], Indices.size());
688
689  RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
690
691  const Type *T = AI->getAllocatedType();
692  const Type *IdxTy;
693  uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy);
694  if (GEPI->getOperand(0) == AI)
695    OldIdx = ~0ULL; // Force the GEP to be rewritten.
696
697  T = AI->getAllocatedType();
698  uint64_t EltOffset = Offset;
699  uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
700
701  // If this GEP does not move the pointer across elements of the alloca
702  // being split, then it does not needs to be rewritten.
703  if (Idx == OldIdx)
704    return;
705
706  const Type *i32Ty = Type::getInt32Ty(AI->getContext());
707  SmallVector<Value*, 8> NewArgs;
708  NewArgs.push_back(Constant::getNullValue(i32Ty));
709  while (EltOffset != 0) {
710    uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy);
711    NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx));
712  }
713  Instruction *Val = NewElts[Idx];
714  if (NewArgs.size() > 1) {
715    Val = GetElementPtrInst::CreateInBounds(Val, NewArgs.begin(),
716                                            NewArgs.end(), "", GEPI);
717    Val->takeName(GEPI);
718  }
719  if (Val->getType() != GEPI->getType())
720    Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI);
721  GEPI->replaceAllUsesWith(Val);
722  DeadInsts.push_back(GEPI);
723}
724
725/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
726/// Rewrite it to copy or set the elements of the scalarized memory.
727void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
728                                        AllocaInst *AI,
729                                        SmallVector<AllocaInst*, 32> &NewElts) {
730  // If this is a memcpy/memmove, construct the other pointer as the
731  // appropriate type.  The "Other" pointer is the pointer that goes to memory
732  // that doesn't have anything to do with the alloca that we are promoting. For
733  // memset, this Value* stays null.
734  Value *OtherPtr = 0;
735  LLVMContext &Context = MI->getContext();
736  unsigned MemAlignment = MI->getAlignment();
737  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
738    if (Inst == MTI->getRawDest())
739      OtherPtr = MTI->getRawSource();
740    else {
741      assert(Inst == MTI->getRawSource());
742      OtherPtr = MTI->getRawDest();
743    }
744  }
745
746  // If there is an other pointer, we want to convert it to the same pointer
747  // type as AI has, so we can GEP through it safely.
748  if (OtherPtr) {
749
750    // Remove bitcasts and all-zero GEPs from OtherPtr.  This is an
751    // optimization, but it's also required to detect the corner case where
752    // both pointer operands are referencing the same memory, and where
753    // OtherPtr may be a bitcast or GEP that currently being rewritten.  (This
754    // function is only called for mem intrinsics that access the whole
755    // aggregate, so non-zero GEPs are not an issue here.)
756    while (1) {
757      if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) {
758        OtherPtr = BC->getOperand(0);
759        continue;
760      }
761      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) {
762        // All zero GEPs are effectively bitcasts.
763        if (GEP->hasAllZeroIndices()) {
764          OtherPtr = GEP->getOperand(0);
765          continue;
766        }
767      }
768      break;
769    }
770    // Copying the alloca to itself is a no-op: just delete it.
771    if (OtherPtr == AI || OtherPtr == NewElts[0]) {
772      // This code will run twice for a no-op memcpy -- once for each operand.
773      // Put only one reference to MI on the DeadInsts list.
774      for (SmallVector<Value*, 32>::const_iterator I = DeadInsts.begin(),
775             E = DeadInsts.end(); I != E; ++I)
776        if (*I == MI) return;
777      DeadInsts.push_back(MI);
778      return;
779    }
780
781    if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
782      if (BCE->getOpcode() == Instruction::BitCast)
783        OtherPtr = BCE->getOperand(0);
784
785    // If the pointer is not the right type, insert a bitcast to the right
786    // type.
787    if (OtherPtr->getType() != AI->getType())
788      OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
789                                 MI);
790  }
791
792  // Process each element of the aggregate.
793  Value *TheFn = MI->getCalledValue();
794  const Type *BytePtrTy = MI->getRawDest()->getType();
795  bool SROADest = MI->getRawDest() == Inst;
796
797  Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
798
799  for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
800    // If this is a memcpy/memmove, emit a GEP of the other element address.
801    Value *OtherElt = 0;
802    unsigned OtherEltAlign = MemAlignment;
803
804    if (OtherPtr) {
805      Value *Idx[2] = { Zero,
806                      ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
807      OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2,
808                                              OtherPtr->getName()+"."+Twine(i),
809                                                   MI);
810      uint64_t EltOffset;
811      const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
812      if (const StructType *ST =
813            dyn_cast<StructType>(OtherPtrTy->getElementType())) {
814        EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
815      } else {
816        const Type *EltTy =
817          cast<SequentialType>(OtherPtr->getType())->getElementType();
818        EltOffset = TD->getTypeAllocSize(EltTy)*i;
819      }
820
821      // The alignment of the other pointer is the guaranteed alignment of the
822      // element, which is affected by both the known alignment of the whole
823      // mem intrinsic and the alignment of the element.  If the alignment of
824      // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
825      // known alignment is just 4 bytes.
826      OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
827    }
828
829    Value *EltPtr = NewElts[i];
830    const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
831
832    // If we got down to a scalar, insert a load or store as appropriate.
833    if (EltTy->isSingleValueType()) {
834      if (isa<MemTransferInst>(MI)) {
835        if (SROADest) {
836          // From Other to Alloca.
837          Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
838          new StoreInst(Elt, EltPtr, MI);
839        } else {
840          // From Alloca to Other.
841          Value *Elt = new LoadInst(EltPtr, "tmp", MI);
842          new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
843        }
844        continue;
845      }
846      assert(isa<MemSetInst>(MI));
847
848      // If the stored element is zero (common case), just store a null
849      // constant.
850      Constant *StoreVal;
851      if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(1))) {
852        if (CI->isZero()) {
853          StoreVal = Constant::getNullValue(EltTy);  // 0.0, null, 0, <0,0>
854        } else {
855          // If EltTy is a vector type, get the element type.
856          const Type *ValTy = EltTy->getScalarType();
857
858          // Construct an integer with the right value.
859          unsigned EltSize = TD->getTypeSizeInBits(ValTy);
860          APInt OneVal(EltSize, CI->getZExtValue());
861          APInt TotalVal(OneVal);
862          // Set each byte.
863          for (unsigned i = 0; 8*i < EltSize; ++i) {
864            TotalVal = TotalVal.shl(8);
865            TotalVal |= OneVal;
866          }
867
868          // Convert the integer value to the appropriate type.
869          StoreVal = ConstantInt::get(Context, TotalVal);
870          if (ValTy->isPointerTy())
871            StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
872          else if (ValTy->isFloatingPointTy())
873            StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
874          assert(StoreVal->getType() == ValTy && "Type mismatch!");
875
876          // If the requested value was a vector constant, create it.
877          if (EltTy != ValTy) {
878            unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
879            SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
880            StoreVal = ConstantVector::get(&Elts[0], NumElts);
881          }
882        }
883        new StoreInst(StoreVal, EltPtr, MI);
884        continue;
885      }
886      // Otherwise, if we're storing a byte variable, use a memset call for
887      // this element.
888    }
889
890    // Cast the element pointer to BytePtrTy.
891    if (EltPtr->getType() != BytePtrTy)
892      EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
893
894    // Cast the other pointer (if we have one) to BytePtrTy.
895    if (OtherElt && OtherElt->getType() != BytePtrTy) {
896      // Preserve address space of OtherElt
897      const PointerType* OtherPTy = cast<PointerType>(OtherElt->getType());
898      const PointerType* PTy = cast<PointerType>(BytePtrTy);
899      if (OtherPTy->getElementType() != PTy->getElementType()) {
900        Type *NewOtherPTy = PointerType::get(PTy->getElementType(),
901                                             OtherPTy->getAddressSpace());
902        OtherElt = new BitCastInst(OtherElt, NewOtherPTy,
903                                   OtherElt->getNameStr(), MI);
904      }
905    }
906
907    unsigned EltSize = TD->getTypeAllocSize(EltTy);
908
909    // Finally, insert the meminst for this element.
910    if (isa<MemTransferInst>(MI)) {
911      Value *Ops[] = {
912        SROADest ? EltPtr : OtherElt,  // Dest ptr
913        SROADest ? OtherElt : EltPtr,  // Src ptr
914        ConstantInt::get(MI->getOperand(2)->getType(), EltSize), // Size
915        // Align
916        ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
917        MI->getVolatileCst()
918      };
919      // In case we fold the address space overloaded memcpy of A to B
920      // with memcpy of B to C, change the function to be a memcpy of A to C.
921      const Type *Tys[] = { Ops[0]->getType(), Ops[1]->getType(),
922                            Ops[2]->getType() };
923      Module *M = MI->getParent()->getParent()->getParent();
924      TheFn = Intrinsic::getDeclaration(M, MI->getIntrinsicID(), Tys, 3);
925      CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
926    } else {
927      assert(isa<MemSetInst>(MI));
928      Value *Ops[] = {
929        EltPtr, MI->getOperand(1),  // Dest, Value,
930        ConstantInt::get(MI->getOperand(2)->getType(), EltSize), // Size
931        Zero,  // Align
932        ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
933      };
934      const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
935      Module *M = MI->getParent()->getParent()->getParent();
936      TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
937      CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
938    }
939  }
940  DeadInsts.push_back(MI);
941}
942
943/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
944/// overwrites the entire allocation.  Extract out the pieces of the stored
945/// integer and store them individually.
946void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
947                                         SmallVector<AllocaInst*, 32> &NewElts){
948  // Extract each element out of the integer according to its structure offset
949  // and store the element value to the individual alloca.
950  Value *SrcVal = SI->getOperand(0);
951  const Type *AllocaEltTy = AI->getAllocatedType();
952  uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
953
954  // Handle tail padding by extending the operand
955  if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
956    SrcVal = new ZExtInst(SrcVal,
957                          IntegerType::get(SI->getContext(), AllocaSizeBits),
958                          "", SI);
959
960  DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI
961               << '\n');
962
963  // There are two forms here: AI could be an array or struct.  Both cases
964  // have different ways to compute the element offset.
965  if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
966    const StructLayout *Layout = TD->getStructLayout(EltSTy);
967
968    for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
969      // Get the number of bits to shift SrcVal to get the value.
970      const Type *FieldTy = EltSTy->getElementType(i);
971      uint64_t Shift = Layout->getElementOffsetInBits(i);
972
973      if (TD->isBigEndian())
974        Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
975
976      Value *EltVal = SrcVal;
977      if (Shift) {
978        Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
979        EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
980                                            "sroa.store.elt", SI);
981      }
982
983      // Truncate down to an integer of the right size.
984      uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
985
986      // Ignore zero sized fields like {}, they obviously contain no data.
987      if (FieldSizeBits == 0) continue;
988
989      if (FieldSizeBits != AllocaSizeBits)
990        EltVal = new TruncInst(EltVal,
991                             IntegerType::get(SI->getContext(), FieldSizeBits),
992                              "", SI);
993      Value *DestField = NewElts[i];
994      if (EltVal->getType() == FieldTy) {
995        // Storing to an integer field of this size, just do it.
996      } else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) {
997        // Bitcast to the right element type (for fp/vector values).
998        EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
999      } else {
1000        // Otherwise, bitcast the dest pointer (for aggregates).
1001        DestField = new BitCastInst(DestField,
1002                              PointerType::getUnqual(EltVal->getType()),
1003                                    "", SI);
1004      }
1005      new StoreInst(EltVal, DestField, SI);
1006    }
1007
1008  } else {
1009    const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
1010    const Type *ArrayEltTy = ATy->getElementType();
1011    uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
1012    uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
1013
1014    uint64_t Shift;
1015
1016    if (TD->isBigEndian())
1017      Shift = AllocaSizeBits-ElementOffset;
1018    else
1019      Shift = 0;
1020
1021    for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1022      // Ignore zero sized fields like {}, they obviously contain no data.
1023      if (ElementSizeBits == 0) continue;
1024
1025      Value *EltVal = SrcVal;
1026      if (Shift) {
1027        Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
1028        EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
1029                                            "sroa.store.elt", SI);
1030      }
1031
1032      // Truncate down to an integer of the right size.
1033      if (ElementSizeBits != AllocaSizeBits)
1034        EltVal = new TruncInst(EltVal,
1035                               IntegerType::get(SI->getContext(),
1036                                                ElementSizeBits),"",SI);
1037      Value *DestField = NewElts[i];
1038      if (EltVal->getType() == ArrayEltTy) {
1039        // Storing to an integer field of this size, just do it.
1040      } else if (ArrayEltTy->isFloatingPointTy() ||
1041                 ArrayEltTy->isVectorTy()) {
1042        // Bitcast to the right element type (for fp/vector values).
1043        EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
1044      } else {
1045        // Otherwise, bitcast the dest pointer (for aggregates).
1046        DestField = new BitCastInst(DestField,
1047                              PointerType::getUnqual(EltVal->getType()),
1048                                    "", SI);
1049      }
1050      new StoreInst(EltVal, DestField, SI);
1051
1052      if (TD->isBigEndian())
1053        Shift -= ElementOffset;
1054      else
1055        Shift += ElementOffset;
1056    }
1057  }
1058
1059  DeadInsts.push_back(SI);
1060}
1061
1062/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
1063/// an integer.  Load the individual pieces to form the aggregate value.
1064void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
1065                                        SmallVector<AllocaInst*, 32> &NewElts) {
1066  // Extract each element out of the NewElts according to its structure offset
1067  // and form the result value.
1068  const Type *AllocaEltTy = AI->getAllocatedType();
1069  uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
1070
1071  DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
1072               << '\n');
1073
1074  // There are two forms here: AI could be an array or struct.  Both cases
1075  // have different ways to compute the element offset.
1076  const StructLayout *Layout = 0;
1077  uint64_t ArrayEltBitOffset = 0;
1078  if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
1079    Layout = TD->getStructLayout(EltSTy);
1080  } else {
1081    const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
1082    ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
1083  }
1084
1085  Value *ResultVal =
1086    Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
1087
1088  for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1089    // Load the value from the alloca.  If the NewElt is an aggregate, cast
1090    // the pointer to an integer of the same size before doing the load.
1091    Value *SrcField = NewElts[i];
1092    const Type *FieldTy =
1093      cast<PointerType>(SrcField->getType())->getElementType();
1094    uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
1095
1096    // Ignore zero sized fields like {}, they obviously contain no data.
1097    if (FieldSizeBits == 0) continue;
1098
1099    const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
1100                                                     FieldSizeBits);
1101    if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() &&
1102        !FieldTy->isVectorTy())
1103      SrcField = new BitCastInst(SrcField,
1104                                 PointerType::getUnqual(FieldIntTy),
1105                                 "", LI);
1106    SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
1107
1108    // If SrcField is a fp or vector of the right size but that isn't an
1109    // integer type, bitcast to an integer so we can shift it.
1110    if (SrcField->getType() != FieldIntTy)
1111      SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
1112
1113    // Zero extend the field to be the same size as the final alloca so that
1114    // we can shift and insert it.
1115    if (SrcField->getType() != ResultVal->getType())
1116      SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
1117
1118    // Determine the number of bits to shift SrcField.
1119    uint64_t Shift;
1120    if (Layout) // Struct case.
1121      Shift = Layout->getElementOffsetInBits(i);
1122    else  // Array case.
1123      Shift = i*ArrayEltBitOffset;
1124
1125    if (TD->isBigEndian())
1126      Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
1127
1128    if (Shift) {
1129      Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
1130      SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
1131    }
1132
1133    ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
1134  }
1135
1136  // Handle tail padding by truncating the result
1137  if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
1138    ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
1139
1140  LI->replaceAllUsesWith(ResultVal);
1141  DeadInsts.push_back(LI);
1142}
1143
1144/// HasPadding - Return true if the specified type has any structure or
1145/// alignment padding, false otherwise.
1146static bool HasPadding(const Type *Ty, const TargetData &TD) {
1147  if (const StructType *STy = dyn_cast<StructType>(Ty)) {
1148    const StructLayout *SL = TD.getStructLayout(STy);
1149    unsigned PrevFieldBitOffset = 0;
1150    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1151      unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
1152
1153      // Padding in sub-elements?
1154      if (HasPadding(STy->getElementType(i), TD))
1155        return true;
1156
1157      // Check to see if there is any padding between this element and the
1158      // previous one.
1159      if (i) {
1160        unsigned PrevFieldEnd =
1161        PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
1162        if (PrevFieldEnd < FieldBitOffset)
1163          return true;
1164      }
1165
1166      PrevFieldBitOffset = FieldBitOffset;
1167    }
1168
1169    //  Check for tail padding.
1170    if (unsigned EltCount = STy->getNumElements()) {
1171      unsigned PrevFieldEnd = PrevFieldBitOffset +
1172                   TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
1173      if (PrevFieldEnd < SL->getSizeInBits())
1174        return true;
1175    }
1176
1177  } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1178    return HasPadding(ATy->getElementType(), TD);
1179  } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1180    return HasPadding(VTy->getElementType(), TD);
1181  }
1182  return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
1183}
1184
1185/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1186/// an aggregate can be broken down into elements.  Return 0 if not, 3 if safe,
1187/// or 1 if safe after canonicalization has been performed.
1188bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
1189  // Loop over the use list of the alloca.  We can only transform it if all of
1190  // the users are safe to transform.
1191  AllocaInfo Info;
1192
1193  isSafeForScalarRepl(AI, AI, 0, Info);
1194  if (Info.isUnsafe) {
1195    DEBUG(dbgs() << "Cannot transform: " << *AI << '\n');
1196    return false;
1197  }
1198
1199  // Okay, we know all the users are promotable.  If the aggregate is a memcpy
1200  // source and destination, we have to be careful.  In particular, the memcpy
1201  // could be moving around elements that live in structure padding of the LLVM
1202  // types, but may actually be used.  In these cases, we refuse to promote the
1203  // struct.
1204  if (Info.isMemCpySrc && Info.isMemCpyDst &&
1205      HasPadding(AI->getAllocatedType(), *TD))
1206    return false;
1207
1208  return true;
1209}
1210
1211/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1212/// the offset specified by Offset (which is specified in bytes).
1213///
1214/// There are two cases we handle here:
1215///   1) A union of vector types of the same size and potentially its elements.
1216///      Here we turn element accesses into insert/extract element operations.
1217///      This promotes a <4 x float> with a store of float to the third element
1218///      into a <4 x float> that uses insert element.
1219///   2) A fully general blob of memory, which we turn into some (potentially
1220///      large) integer type with extract and insert operations where the loads
1221///      and stores would mutate the memory.
1222void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
1223  // Remember if we saw a vector type.
1224  HadAVector |= In->isVectorTy();
1225
1226  if (VectorTy && VectorTy->isVoidTy())
1227    return;
1228
1229  // If this could be contributing to a vector, analyze it.
1230
1231  // If the In type is a vector that is the same size as the alloca, see if it
1232  // matches the existing VecTy.
1233  if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
1234    if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
1235      // If we're storing/loading a vector of the right size, allow it as a
1236      // vector.  If this the first vector we see, remember the type so that
1237      // we know the element size.
1238      if (VectorTy == 0)
1239        VectorTy = VInTy;
1240      return;
1241    }
1242  } else if (In->isFloatTy() || In->isDoubleTy() ||
1243             (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 &&
1244              isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
1245    // If we're accessing something that could be an element of a vector, see
1246    // if the implied vector agrees with what we already have and if Offset is
1247    // compatible with it.
1248    unsigned EltSize = In->getPrimitiveSizeInBits()/8;
1249    if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
1250        (VectorTy == 0 ||
1251         cast<VectorType>(VectorTy)->getElementType()
1252               ->getPrimitiveSizeInBits()/8 == EltSize)) {
1253      if (VectorTy == 0)
1254        VectorTy = VectorType::get(In, AllocaSize/EltSize);
1255      return;
1256    }
1257  }
1258
1259  // Otherwise, we have a case that we can't handle with an optimized vector
1260  // form.  We can still turn this into a large integer.
1261  VectorTy = Type::getVoidTy(In->getContext());
1262}
1263
1264/// CanConvertToScalar - V is a pointer.  If we can convert the pointee and all
1265/// its accesses to a single vector type, return true and set VecTy to
1266/// the new type.  If we could convert the alloca into a single promotable
1267/// integer, return true but set VecTy to VoidTy.  Further, if the use is not a
1268/// completely trivial use that mem2reg could promote, set IsNotTrivial.  Offset
1269/// is the current offset from the base of the alloca being analyzed.
1270///
1271/// If we see at least one access to the value that is as a vector type, set the
1272/// SawVec flag.
1273bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
1274  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1275    Instruction *User = cast<Instruction>(*UI);
1276
1277    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1278      // Don't break volatile loads.
1279      if (LI->isVolatile())
1280        return false;
1281      MergeInType(LI->getType(), Offset);
1282      continue;
1283    }
1284
1285    if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1286      // Storing the pointer, not into the value?
1287      if (SI->getOperand(0) == V || SI->isVolatile()) return false;
1288      MergeInType(SI->getOperand(0)->getType(), Offset);
1289      continue;
1290    }
1291
1292    if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
1293      if (!CanConvertToScalar(BCI, Offset))
1294        return false;
1295      IsNotTrivial = true;
1296      continue;
1297    }
1298
1299    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1300      // If this is a GEP with a variable indices, we can't handle it.
1301      if (!GEP->hasAllConstantIndices())
1302        return false;
1303
1304      // Compute the offset that this GEP adds to the pointer.
1305      SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1306      uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
1307                                               &Indices[0], Indices.size());
1308      // See if all uses can be converted.
1309      if (!CanConvertToScalar(GEP, Offset+GEPOffset))
1310        return false;
1311      IsNotTrivial = true;
1312      continue;
1313    }
1314
1315    // If this is a constant sized memset of a constant value (e.g. 0) we can
1316    // handle it.
1317    if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1318      // Store of constant value and constant size.
1319      if (isa<ConstantInt>(MSI->getValue()) &&
1320          isa<ConstantInt>(MSI->getLength())) {
1321        IsNotTrivial = true;
1322        continue;
1323      }
1324    }
1325
1326    // If this is a memcpy or memmove into or out of the whole allocation, we
1327    // can handle it like a load or store of the scalar type.
1328    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1329      if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
1330        if (Len->getZExtValue() == AllocaSize && Offset == 0) {
1331          IsNotTrivial = true;
1332          continue;
1333        }
1334    }
1335
1336    // Otherwise, we cannot handle this!
1337    return false;
1338  }
1339
1340  return true;
1341}
1342
1343/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1344/// directly.  This happens when we are converting an "integer union" to a
1345/// single integer scalar, or when we are converting a "vector union" to a
1346/// vector with insert/extractelement instructions.
1347///
1348/// Offset is an offset from the original alloca, in bits that need to be
1349/// shifted to the right.  By the end of this, there should be no uses of Ptr.
1350void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
1351                                              uint64_t Offset) {
1352  while (!Ptr->use_empty()) {
1353    Instruction *User = cast<Instruction>(Ptr->use_back());
1354
1355    if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1356      ConvertUsesToScalar(CI, NewAI, Offset);
1357      CI->eraseFromParent();
1358      continue;
1359    }
1360
1361    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1362      // Compute the offset that this GEP adds to the pointer.
1363      SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1364      uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
1365                                               &Indices[0], Indices.size());
1366      ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
1367      GEP->eraseFromParent();
1368      continue;
1369    }
1370
1371    IRBuilder<> Builder(User->getParent(), User);
1372
1373    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1374      // The load is a bit extract from NewAI shifted right by Offset bits.
1375      Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
1376      Value *NewLoadVal
1377        = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
1378      LI->replaceAllUsesWith(NewLoadVal);
1379      LI->eraseFromParent();
1380      continue;
1381    }
1382
1383    if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1384      assert(SI->getOperand(0) != Ptr && "Consistency error!");
1385      Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
1386      Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
1387                                             Builder);
1388      Builder.CreateStore(New, NewAI);
1389      SI->eraseFromParent();
1390
1391      // If the load we just inserted is now dead, then the inserted store
1392      // overwrote the entire thing.
1393      if (Old->use_empty())
1394        Old->eraseFromParent();
1395      continue;
1396    }
1397
1398    // If this is a constant sized memset of a constant value (e.g. 0) we can
1399    // transform it into a store of the expanded constant value.
1400    if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1401      assert(MSI->getRawDest() == Ptr && "Consistency error!");
1402      unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
1403      if (NumBytes != 0) {
1404        unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
1405
1406        // Compute the value replicated the right number of times.
1407        APInt APVal(NumBytes*8, Val);
1408
1409        // Splat the value if non-zero.
1410        if (Val)
1411          for (unsigned i = 1; i != NumBytes; ++i)
1412            APVal |= APVal << 8;
1413
1414        Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
1415        Value *New = ConvertScalar_InsertValue(
1416                                    ConstantInt::get(User->getContext(), APVal),
1417                                               Old, Offset, Builder);
1418        Builder.CreateStore(New, NewAI);
1419
1420        // If the load we just inserted is now dead, then the memset overwrote
1421        // the entire thing.
1422        if (Old->use_empty())
1423          Old->eraseFromParent();
1424      }
1425      MSI->eraseFromParent();
1426      continue;
1427    }
1428
1429    // If this is a memcpy or memmove into or out of the whole allocation, we
1430    // can handle it like a load or store of the scalar type.
1431    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1432      assert(Offset == 0 && "must be store to start of alloca");
1433
1434      // If the source and destination are both to the same alloca, then this is
1435      // a noop copy-to-self, just delete it.  Otherwise, emit a load and store
1436      // as appropriate.
1437      AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject(0));
1438
1439      if (MTI->getSource()->getUnderlyingObject(0) != OrigAI) {
1440        // Dest must be OrigAI, change this to be a load from the original
1441        // pointer (bitcasted), then a store to our new alloca.
1442        assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
1443        Value *SrcPtr = MTI->getSource();
1444        SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
1445
1446        LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
1447        SrcVal->setAlignment(MTI->getAlignment());
1448        Builder.CreateStore(SrcVal, NewAI);
1449      } else if (MTI->getDest()->getUnderlyingObject(0) != OrigAI) {
1450        // Src must be OrigAI, change this to be a load from NewAI then a store
1451        // through the original dest pointer (bitcasted).
1452        assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
1453        LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
1454
1455        Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
1456        StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
1457        NewStore->setAlignment(MTI->getAlignment());
1458      } else {
1459        // Noop transfer. Src == Dst
1460      }
1461
1462      MTI->eraseFromParent();
1463      continue;
1464    }
1465
1466    llvm_unreachable("Unsupported operation!");
1467  }
1468}
1469
1470/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
1471/// or vector value FromVal, extracting the bits from the offset specified by
1472/// Offset.  This returns the value, which is of type ToType.
1473///
1474/// This happens when we are converting an "integer union" to a single
1475/// integer scalar, or when we are converting a "vector union" to a vector with
1476/// insert/extractelement instructions.
1477///
1478/// Offset is an offset from the original alloca, in bits that need to be
1479/// shifted to the right.
1480Value *ConvertToScalarInfo::
1481ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
1482                           uint64_t Offset, IRBuilder<> &Builder) {
1483  // If the load is of the whole new alloca, no conversion is needed.
1484  if (FromVal->getType() == ToType && Offset == 0)
1485    return FromVal;
1486
1487  // If the result alloca is a vector type, this is either an element
1488  // access or a bitcast to another vector type of the same size.
1489  if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
1490    if (ToType->isVectorTy())
1491      return Builder.CreateBitCast(FromVal, ToType, "tmp");
1492
1493    // Otherwise it must be an element access.
1494    unsigned Elt = 0;
1495    if (Offset) {
1496      unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
1497      Elt = Offset/EltSize;
1498      assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
1499    }
1500    // Return the element extracted out of it.
1501    Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
1502                    Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
1503    if (V->getType() != ToType)
1504      V = Builder.CreateBitCast(V, ToType, "tmp");
1505    return V;
1506  }
1507
1508  // If ToType is a first class aggregate, extract out each of the pieces and
1509  // use insertvalue's to form the FCA.
1510  if (const StructType *ST = dyn_cast<StructType>(ToType)) {
1511    const StructLayout &Layout = *TD.getStructLayout(ST);
1512    Value *Res = UndefValue::get(ST);
1513    for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1514      Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
1515                                        Offset+Layout.getElementOffsetInBits(i),
1516                                              Builder);
1517      Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1518    }
1519    return Res;
1520  }
1521
1522  if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
1523    uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
1524    Value *Res = UndefValue::get(AT);
1525    for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1526      Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
1527                                              Offset+i*EltSize, Builder);
1528      Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1529    }
1530    return Res;
1531  }
1532
1533  // Otherwise, this must be a union that was converted to an integer value.
1534  const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
1535
1536  // If this is a big-endian system and the load is narrower than the
1537  // full alloca type, we need to do a shift to get the right bits.
1538  int ShAmt = 0;
1539  if (TD.isBigEndian()) {
1540    // On big-endian machines, the lowest bit is stored at the bit offset
1541    // from the pointer given by getTypeStoreSizeInBits.  This matters for
1542    // integers with a bitwidth that is not a multiple of 8.
1543    ShAmt = TD.getTypeStoreSizeInBits(NTy) -
1544            TD.getTypeStoreSizeInBits(ToType) - Offset;
1545  } else {
1546    ShAmt = Offset;
1547  }
1548
1549  // Note: we support negative bitwidths (with shl) which are not defined.
1550  // We do this to support (f.e.) loads off the end of a structure where
1551  // only some bits are used.
1552  if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
1553    FromVal = Builder.CreateLShr(FromVal,
1554                                 ConstantInt::get(FromVal->getType(),
1555                                                           ShAmt), "tmp");
1556  else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
1557    FromVal = Builder.CreateShl(FromVal,
1558                                ConstantInt::get(FromVal->getType(),
1559                                                          -ShAmt), "tmp");
1560
1561  // Finally, unconditionally truncate the integer to the right width.
1562  unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
1563  if (LIBitWidth < NTy->getBitWidth())
1564    FromVal =
1565      Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
1566                                                    LIBitWidth), "tmp");
1567  else if (LIBitWidth > NTy->getBitWidth())
1568    FromVal =
1569       Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
1570                                                    LIBitWidth), "tmp");
1571
1572  // If the result is an integer, this is a trunc or bitcast.
1573  if (ToType->isIntegerTy()) {
1574    // Should be done.
1575  } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) {
1576    // Just do a bitcast, we know the sizes match up.
1577    FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
1578  } else {
1579    // Otherwise must be a pointer.
1580    FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
1581  }
1582  assert(FromVal->getType() == ToType && "Didn't convert right?");
1583  return FromVal;
1584}
1585
1586/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
1587/// or vector value "Old" at the offset specified by Offset.
1588///
1589/// This happens when we are converting an "integer union" to a
1590/// single integer scalar, or when we are converting a "vector union" to a
1591/// vector with insert/extractelement instructions.
1592///
1593/// Offset is an offset from the original alloca, in bits that need to be
1594/// shifted to the right.
1595Value *ConvertToScalarInfo::
1596ConvertScalar_InsertValue(Value *SV, Value *Old,
1597                          uint64_t Offset, IRBuilder<> &Builder) {
1598  // Convert the stored type to the actual type, shift it left to insert
1599  // then 'or' into place.
1600  const Type *AllocaType = Old->getType();
1601  LLVMContext &Context = Old->getContext();
1602
1603  if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
1604    uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
1605    uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
1606
1607    // Changing the whole vector with memset or with an access of a different
1608    // vector type?
1609    if (ValSize == VecSize)
1610      return Builder.CreateBitCast(SV, AllocaType, "tmp");
1611
1612    uint64_t EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
1613
1614    // Must be an element insertion.
1615    unsigned Elt = Offset/EltSize;
1616
1617    if (SV->getType() != VTy->getElementType())
1618      SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
1619
1620    SV = Builder.CreateInsertElement(Old, SV,
1621                     ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
1622                                     "tmp");
1623    return SV;
1624  }
1625
1626  // If SV is a first-class aggregate value, insert each value recursively.
1627  if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
1628    const StructLayout &Layout = *TD.getStructLayout(ST);
1629    for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1630      Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1631      Old = ConvertScalar_InsertValue(Elt, Old,
1632                                      Offset+Layout.getElementOffsetInBits(i),
1633                                      Builder);
1634    }
1635    return Old;
1636  }
1637
1638  if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
1639    uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
1640    for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1641      Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1642      Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
1643    }
1644    return Old;
1645  }
1646
1647  // If SV is a float, convert it to the appropriate integer type.
1648  // If it is a pointer, do the same.
1649  unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
1650  unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
1651  unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
1652  unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
1653  if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
1654    SV = Builder.CreateBitCast(SV,
1655                            IntegerType::get(SV->getContext(),SrcWidth), "tmp");
1656  else if (SV->getType()->isPointerTy())
1657    SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()), "tmp");
1658
1659  // Zero extend or truncate the value if needed.
1660  if (SV->getType() != AllocaType) {
1661    if (SV->getType()->getPrimitiveSizeInBits() <
1662             AllocaType->getPrimitiveSizeInBits())
1663      SV = Builder.CreateZExt(SV, AllocaType, "tmp");
1664    else {
1665      // Truncation may be needed if storing more than the alloca can hold
1666      // (undefined behavior).
1667      SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
1668      SrcWidth = DestWidth;
1669      SrcStoreWidth = DestStoreWidth;
1670    }
1671  }
1672
1673  // If this is a big-endian system and the store is narrower than the
1674  // full alloca type, we need to do a shift to get the right bits.
1675  int ShAmt = 0;
1676  if (TD.isBigEndian()) {
1677    // On big-endian machines, the lowest bit is stored at the bit offset
1678    // from the pointer given by getTypeStoreSizeInBits.  This matters for
1679    // integers with a bitwidth that is not a multiple of 8.
1680    ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1681  } else {
1682    ShAmt = Offset;
1683  }
1684
1685  // Note: we support negative bitwidths (with shr) which are not defined.
1686  // We do this to support (f.e.) stores off the end of a structure where
1687  // only some bits in the structure are set.
1688  APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1689  if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1690    SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
1691                           ShAmt), "tmp");
1692    Mask <<= ShAmt;
1693  } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1694    SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
1695                            -ShAmt), "tmp");
1696    Mask = Mask.lshr(-ShAmt);
1697  }
1698
1699  // Mask out the bits we are about to insert from the old value, and or
1700  // in the new bits.
1701  if (SrcWidth != DestWidth) {
1702    assert(DestWidth > SrcWidth);
1703    Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
1704    SV = Builder.CreateOr(Old, SV, "ins");
1705  }
1706  return SV;
1707}
1708
1709
1710
1711/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1712/// some part of a constant global variable.  This intentionally only accepts
1713/// constant expressions because we don't can't rewrite arbitrary instructions.
1714static bool PointsToConstantGlobal(Value *V) {
1715  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1716    return GV->isConstant();
1717  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1718    if (CE->getOpcode() == Instruction::BitCast ||
1719        CE->getOpcode() == Instruction::GetElementPtr)
1720      return PointsToConstantGlobal(CE->getOperand(0));
1721  return false;
1722}
1723
1724/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1725/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
1726/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
1727/// track of whether it moves the pointer (with isOffset) but otherwise traverse
1728/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
1729/// the alloca, and if the source pointer is a pointer to a constant  global, we
1730/// can optimize this.
1731static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
1732                                           bool isOffset) {
1733  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1734    User *U = cast<Instruction>(*UI);
1735
1736    if (LoadInst *LI = dyn_cast<LoadInst>(U))
1737      // Ignore non-volatile loads, they are always ok.
1738      if (!LI->isVolatile())
1739        continue;
1740
1741    if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
1742      // If uses of the bitcast are ok, we are ok.
1743      if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1744        return false;
1745      continue;
1746    }
1747    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
1748      // If the GEP has all zero indices, it doesn't offset the pointer.  If it
1749      // doesn't, it does.
1750      if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1751                                         isOffset || !GEP->hasAllZeroIndices()))
1752        return false;
1753      continue;
1754    }
1755
1756    // If this is isn't our memcpy/memmove, reject it as something we can't
1757    // handle.
1758    MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
1759    if (MI == 0)
1760      return false;
1761
1762    // If we already have seen a copy, reject the second one.
1763    if (TheCopy) return false;
1764
1765    // If the pointer has been offset from the start of the alloca, we can't
1766    // safely handle this.
1767    if (isOffset) return false;
1768
1769    // If the memintrinsic isn't using the alloca as the dest, reject it.
1770    if (UI.getOperandNo() != 0) return false;
1771
1772    // If the source of the memcpy/move is not a constant global, reject it.
1773    if (!PointsToConstantGlobal(MI->getSource()))
1774      return false;
1775
1776    // Otherwise, the transform is safe.  Remember the copy instruction.
1777    TheCopy = MI;
1778  }
1779  return true;
1780}
1781
1782/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1783/// modified by a copy from a constant global.  If we can prove this, we can
1784/// replace any uses of the alloca with uses of the global directly.
1785MemTransferInst *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
1786  MemTransferInst *TheCopy = 0;
1787  if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
1788    return TheCopy;
1789  return 0;
1790}
1791