ScalarReplAggregates.cpp revision ff1147072a0c9dbe91572bbbbf93031c6451bbae
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This transformation implements the well known scalar replacement of
11// aggregates transformation.  This xform breaks up alloca instructions of
12// aggregate type (structure or array) into individual alloca instructions for
13// each member (if possible).  Then, if possible, it transforms the individual
14// alloca instructions into nice clean scalar SSA form.
15//
16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17// often interact, especially for C++ programs.  As such, iterating between
18// SRoA, then Mem2Reg until we run out of things to promote works well.
19//
20//===----------------------------------------------------------------------===//
21
22#define DEBUG_TYPE "scalarrepl"
23#include "llvm/Transforms/Scalar.h"
24#include "llvm/Constants.h"
25#include "llvm/DerivedTypes.h"
26#include "llvm/Function.h"
27#include "llvm/GlobalVariable.h"
28#include "llvm/Instructions.h"
29#include "llvm/IntrinsicInst.h"
30#include "llvm/LLVMContext.h"
31#include "llvm/Pass.h"
32#include "llvm/Analysis/Dominators.h"
33#include "llvm/Target/TargetData.h"
34#include "llvm/Transforms/Utils/PromoteMemToReg.h"
35#include "llvm/Transforms/Utils/Local.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/GetElementPtrTypeIterator.h"
39#include "llvm/Support/IRBuilder.h"
40#include "llvm/Support/MathExtras.h"
41#include "llvm/Support/raw_ostream.h"
42#include "llvm/ADT/SmallVector.h"
43#include "llvm/ADT/Statistic.h"
44using namespace llvm;
45
46STATISTIC(NumReplaced,  "Number of allocas broken up");
47STATISTIC(NumPromoted,  "Number of allocas promoted");
48STATISTIC(NumConverted, "Number of aggregates converted to scalar");
49STATISTIC(NumGlobals,   "Number of allocas copied from constant global");
50
51namespace {
52  struct SROA : public FunctionPass {
53    static char ID; // Pass identification, replacement for typeid
54    explicit SROA(signed T = -1) : FunctionPass(&ID) {
55      if (T == -1)
56        SRThreshold = 128;
57      else
58        SRThreshold = T;
59    }
60
61    bool runOnFunction(Function &F);
62
63    bool performScalarRepl(Function &F);
64    bool performPromotion(Function &F);
65
66    // getAnalysisUsage - This pass does not require any passes, but we know it
67    // will not alter the CFG, so say so.
68    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
69      AU.addRequired<DominatorTree>();
70      AU.addRequired<DominanceFrontier>();
71      AU.setPreservesCFG();
72    }
73
74  private:
75    TargetData *TD;
76
77    /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
78    /// information about the uses.  All these fields are initialized to false
79    /// and set to true when something is learned.
80    struct AllocaInfo {
81      /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
82      bool isUnsafe : 1;
83
84      /// needsCleanup - This is set to true if there is some use of the alloca
85      /// that requires cleanup.
86      bool needsCleanup : 1;
87
88      /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
89      bool isMemCpySrc : 1;
90
91      /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
92      bool isMemCpyDst : 1;
93
94      AllocaInfo()
95        : isUnsafe(false), needsCleanup(false),
96          isMemCpySrc(false), isMemCpyDst(false) {}
97    };
98
99    unsigned SRThreshold;
100
101    void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
102
103    int isSafeAllocaToScalarRepl(AllocationInst *AI);
104
105    void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
106                               AllocaInfo &Info);
107    void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
108                         AllocaInfo &Info);
109    void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
110                                        unsigned OpNo, AllocaInfo &Info);
111    void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI,
112                                        AllocaInfo &Info);
113
114    void DoScalarReplacement(AllocationInst *AI,
115                             std::vector<AllocationInst*> &WorkList);
116    void CleanupGEP(GetElementPtrInst *GEP);
117    void CleanupAllocaUsers(AllocationInst *AI);
118    AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
119
120    void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
121                                    SmallVector<AllocaInst*, 32> &NewElts);
122
123    void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
124                                      AllocationInst *AI,
125                                      SmallVector<AllocaInst*, 32> &NewElts);
126    void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI,
127                                       SmallVector<AllocaInst*, 32> &NewElts);
128    void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
129                                      SmallVector<AllocaInst*, 32> &NewElts);
130
131    bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
132                            bool &SawVec, uint64_t Offset, unsigned AllocaSize);
133    void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
134    Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
135                                     uint64_t Offset, IRBuilder<> &Builder);
136    Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
137                                     uint64_t Offset, IRBuilder<> &Builder);
138    static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
139  };
140}
141
142char SROA::ID = 0;
143static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
144
145// Public interface to the ScalarReplAggregates pass
146FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
147  return new SROA(Threshold);
148}
149
150
151bool SROA::runOnFunction(Function &F) {
152  TD = getAnalysisIfAvailable<TargetData>();
153
154  bool Changed = performPromotion(F);
155
156  // FIXME: ScalarRepl currently depends on TargetData more than it
157  // theoretically needs to. It should be refactored in order to support
158  // target-independent IR. Until this is done, just skip the actual
159  // scalar-replacement portion of this pass.
160  if (!TD) return Changed;
161
162  while (1) {
163    bool LocalChange = performScalarRepl(F);
164    if (!LocalChange) break;   // No need to repromote if no scalarrepl
165    Changed = true;
166    LocalChange = performPromotion(F);
167    if (!LocalChange) break;   // No need to re-scalarrepl if no promotion
168  }
169
170  return Changed;
171}
172
173
174bool SROA::performPromotion(Function &F) {
175  std::vector<AllocaInst*> Allocas;
176  DominatorTree         &DT = getAnalysis<DominatorTree>();
177  DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
178
179  BasicBlock &BB = F.getEntryBlock();  // Get the entry node for the function
180
181  bool Changed = false;
182
183  while (1) {
184    Allocas.clear();
185
186    // Find allocas that are safe to promote, by looking at all instructions in
187    // the entry node
188    for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
189      if (AllocaInst *AI = dyn_cast<AllocaInst>(I))       // Is it an alloca?
190        if (isAllocaPromotable(AI))
191          Allocas.push_back(AI);
192
193    if (Allocas.empty()) break;
194
195    PromoteMemToReg(Allocas, DT, DF, F.getContext());
196    NumPromoted += Allocas.size();
197    Changed = true;
198  }
199
200  return Changed;
201}
202
203/// getNumSAElements - Return the number of elements in the specific struct or
204/// array.
205static uint64_t getNumSAElements(const Type *T) {
206  if (const StructType *ST = dyn_cast<StructType>(T))
207    return ST->getNumElements();
208  return cast<ArrayType>(T)->getNumElements();
209}
210
211// performScalarRepl - This algorithm is a simple worklist driven algorithm,
212// which runs on all of the malloc/alloca instructions in the function, removing
213// them if they are only used by getelementptr instructions.
214//
215bool SROA::performScalarRepl(Function &F) {
216  std::vector<AllocationInst*> WorkList;
217
218  // Scan the entry basic block, adding any alloca's and mallocs to the worklist
219  BasicBlock &BB = F.getEntryBlock();
220  for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
221    if (AllocationInst *A = dyn_cast<AllocationInst>(I))
222      WorkList.push_back(A);
223
224  // Process the worklist
225  bool Changed = false;
226  while (!WorkList.empty()) {
227    AllocationInst *AI = WorkList.back();
228    WorkList.pop_back();
229
230    // Handle dead allocas trivially.  These can be formed by SROA'ing arrays
231    // with unused elements.
232    if (AI->use_empty()) {
233      AI->eraseFromParent();
234      continue;
235    }
236
237    // If this alloca is impossible for us to promote, reject it early.
238    if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
239      continue;
240
241    // Check to see if this allocation is only modified by a memcpy/memmove from
242    // a constant global.  If this is the case, we can change all users to use
243    // the constant global instead.  This is commonly produced by the CFE by
244    // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
245    // is only subsequently read.
246    if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
247      DEBUG(errs() << "Found alloca equal to global: " << *AI);
248      DEBUG(errs() << "  memcpy = " << *TheCopy);
249      Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
250      AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
251      TheCopy->eraseFromParent();  // Don't mutate the global.
252      AI->eraseFromParent();
253      ++NumGlobals;
254      Changed = true;
255      continue;
256    }
257
258    // Check to see if we can perform the core SROA transformation.  We cannot
259    // transform the allocation instruction if it is an array allocation
260    // (allocations OF arrays are ok though), and an allocation of a scalar
261    // value cannot be decomposed at all.
262    uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
263
264    // Do not promote [0 x %struct].
265    if (AllocaSize == 0) continue;
266
267    // Do not promote any struct whose size is too big.
268    if (AllocaSize > SRThreshold) continue;
269
270    if ((isa<StructType>(AI->getAllocatedType()) ||
271         isa<ArrayType>(AI->getAllocatedType())) &&
272        // Do not promote any struct into more than "32" separate vars.
273        getNumSAElements(AI->getAllocatedType()) <= SRThreshold/4) {
274      // Check that all of the users of the allocation are capable of being
275      // transformed.
276      switch (isSafeAllocaToScalarRepl(AI)) {
277      default: llvm_unreachable("Unexpected value!");
278      case 0:  // Not safe to scalar replace.
279        break;
280      case 1:  // Safe, but requires cleanup/canonicalizations first
281        CleanupAllocaUsers(AI);
282        // FALL THROUGH.
283      case 3:  // Safe to scalar replace.
284        DoScalarReplacement(AI, WorkList);
285        Changed = true;
286        continue;
287      }
288    }
289
290    // If we can turn this aggregate value (potentially with casts) into a
291    // simple scalar value that can be mem2reg'd into a register value.
292    // IsNotTrivial tracks whether this is something that mem2reg could have
293    // promoted itself.  If so, we don't want to transform it needlessly.  Note
294    // that we can't just check based on the type: the alloca may be of an i32
295    // but that has pointer arithmetic to set byte 3 of it or something.
296    bool IsNotTrivial = false;
297    const Type *VectorTy = 0;
298    bool HadAVector = false;
299    if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector,
300                           0, unsigned(AllocaSize)) && IsNotTrivial) {
301      AllocaInst *NewAI;
302      // If we were able to find a vector type that can handle this with
303      // insert/extract elements, and if there was at least one use that had
304      // a vector type, promote this to a vector.  We don't want to promote
305      // random stuff that doesn't use vectors (e.g. <9 x double>) because then
306      // we just get a lot of insert/extracts.  If at least one vector is
307      // involved, then we probably really do have a union of vector/array.
308      if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) {
309        DEBUG(errs() << "CONVERT TO VECTOR: " << *AI << "  TYPE = "
310                     << *VectorTy << '\n');
311
312        // Create and insert the vector alloca.
313        NewAI = new AllocaInst(VectorTy, 0, "",  AI->getParent()->begin());
314        ConvertUsesToScalar(AI, NewAI, 0);
315      } else {
316        DEBUG(errs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
317
318        // Create and insert the integer alloca.
319        const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
320        NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
321        ConvertUsesToScalar(AI, NewAI, 0);
322      }
323      NewAI->takeName(AI);
324      AI->eraseFromParent();
325      ++NumConverted;
326      Changed = true;
327      continue;
328    }
329
330    // Otherwise, couldn't process this alloca.
331  }
332
333  return Changed;
334}
335
336/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
337/// predicate, do SROA now.
338void SROA::DoScalarReplacement(AllocationInst *AI,
339                               std::vector<AllocationInst*> &WorkList) {
340  DEBUG(errs() << "Found inst to SROA: " << *AI << '\n');
341  SmallVector<AllocaInst*, 32> ElementAllocas;
342  if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
343    ElementAllocas.reserve(ST->getNumContainedTypes());
344    for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
345      AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
346                                      AI->getAlignment(),
347                                      AI->getName() + "." + Twine(i), AI);
348      ElementAllocas.push_back(NA);
349      WorkList.push_back(NA);  // Add to worklist for recursive processing
350    }
351  } else {
352    const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
353    ElementAllocas.reserve(AT->getNumElements());
354    const Type *ElTy = AT->getElementType();
355    for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
356      AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
357                                      AI->getName() + "." + Twine(i), AI);
358      ElementAllocas.push_back(NA);
359      WorkList.push_back(NA);  // Add to worklist for recursive processing
360    }
361  }
362
363  // Now that we have created the alloca instructions that we want to use,
364  // expand the getelementptr instructions to use them.
365  //
366  while (!AI->use_empty()) {
367    Instruction *User = cast<Instruction>(AI->use_back());
368    if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
369      RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
370      BCInst->eraseFromParent();
371      continue;
372    }
373
374    // Replace:
375    //   %res = load { i32, i32 }* %alloc
376    // with:
377    //   %load.0 = load i32* %alloc.0
378    //   %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
379    //   %load.1 = load i32* %alloc.1
380    //   %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
381    // (Also works for arrays instead of structs)
382    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
383      Value *Insert = UndefValue::get(LI->getType());
384      for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
385        Value *Load = new LoadInst(ElementAllocas[i], "load", LI);
386        Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
387      }
388      LI->replaceAllUsesWith(Insert);
389      LI->eraseFromParent();
390      continue;
391    }
392
393    // Replace:
394    //   store { i32, i32 } %val, { i32, i32 }* %alloc
395    // with:
396    //   %val.0 = extractvalue { i32, i32 } %val, 0
397    //   store i32 %val.0, i32* %alloc.0
398    //   %val.1 = extractvalue { i32, i32 } %val, 1
399    //   store i32 %val.1, i32* %alloc.1
400    // (Also works for arrays instead of structs)
401    if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
402      Value *Val = SI->getOperand(0);
403      for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
404        Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI);
405        new StoreInst(Extract, ElementAllocas[i], SI);
406      }
407      SI->eraseFromParent();
408      continue;
409    }
410
411    GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
412    // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
413    unsigned Idx =
414       (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
415
416    assert(Idx < ElementAllocas.size() && "Index out of range?");
417    AllocaInst *AllocaToUse = ElementAllocas[Idx];
418
419    Value *RepValue;
420    if (GEPI->getNumOperands() == 3) {
421      // Do not insert a new getelementptr instruction with zero indices, only
422      // to have it optimized out later.
423      RepValue = AllocaToUse;
424    } else {
425      // We are indexing deeply into the structure, so we still need a
426      // getelement ptr instruction to finish the indexing.  This may be
427      // expanded itself once the worklist is rerun.
428      //
429      SmallVector<Value*, 8> NewArgs;
430      NewArgs.push_back(Constant::getNullValue(
431                                           Type::getInt32Ty(AI->getContext())));
432      NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
433      RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(),
434                                           NewArgs.end(), "", GEPI);
435      RepValue->takeName(GEPI);
436    }
437
438    // If this GEP is to the start of the aggregate, check for memcpys.
439    if (Idx == 0 && GEPI->hasAllZeroIndices())
440      RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
441
442    // Move all of the users over to the new GEP.
443    GEPI->replaceAllUsesWith(RepValue);
444    // Delete the old GEP
445    GEPI->eraseFromParent();
446  }
447
448  // Finally, delete the Alloca instruction
449  AI->eraseFromParent();
450  NumReplaced++;
451}
452
453
454/// isSafeElementUse - Check to see if this use is an allowed use for a
455/// getelementptr instruction of an array aggregate allocation.  isFirstElt
456/// indicates whether Ptr is known to the start of the aggregate.
457///
458void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
459                            AllocaInfo &Info) {
460  for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
461       I != E; ++I) {
462    Instruction *User = cast<Instruction>(*I);
463    switch (User->getOpcode()) {
464    case Instruction::Load:  break;
465    case Instruction::Store:
466      // Store is ok if storing INTO the pointer, not storing the pointer
467      if (User->getOperand(0) == Ptr) return MarkUnsafe(Info);
468      break;
469    case Instruction::GetElementPtr: {
470      GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
471      bool AreAllZeroIndices = isFirstElt;
472      if (GEP->getNumOperands() > 1) {
473        if (!isa<ConstantInt>(GEP->getOperand(1)) ||
474            !cast<ConstantInt>(GEP->getOperand(1))->isZero())
475          // Using pointer arithmetic to navigate the array.
476          return MarkUnsafe(Info);
477
478        if (AreAllZeroIndices)
479          AreAllZeroIndices = GEP->hasAllZeroIndices();
480      }
481      isSafeElementUse(GEP, AreAllZeroIndices, AI, Info);
482      if (Info.isUnsafe) return;
483      break;
484    }
485    case Instruction::BitCast:
486      if (isFirstElt) {
487        isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info);
488        if (Info.isUnsafe) return;
489        break;
490      }
491      DEBUG(errs() << "  Transformation preventing inst: " << *User);
492      return MarkUnsafe(Info);
493    case Instruction::Call:
494      if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
495        if (isFirstElt) {
496          isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info);
497          if (Info.isUnsafe) return;
498          break;
499        }
500      }
501      DEBUG(errs() << "  Transformation preventing inst: " << *User);
502      return MarkUnsafe(Info);
503    default:
504      DEBUG(errs() << "  Transformation preventing inst: " << *User);
505      return MarkUnsafe(Info);
506    }
507  }
508  return;  // All users look ok :)
509}
510
511/// AllUsersAreLoads - Return true if all users of this value are loads.
512static bool AllUsersAreLoads(Value *Ptr) {
513  for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
514       I != E; ++I)
515    if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
516      return false;
517  return true;
518}
519
520/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
521/// aggregate allocation.
522///
523void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
524                                 AllocaInfo &Info) {
525  if (BitCastInst *C = dyn_cast<BitCastInst>(User))
526    return isSafeUseOfBitCastedAllocation(C, AI, Info);
527
528  if (LoadInst *LI = dyn_cast<LoadInst>(User))
529    if (!LI->isVolatile())
530      return;// Loads (returning a first class aggregrate) are always rewritable
531
532  if (StoreInst *SI = dyn_cast<StoreInst>(User))
533    if (!SI->isVolatile() && SI->getOperand(0) != AI)
534      return;// Store is ok if storing INTO the pointer, not storing the pointer
535
536  GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User);
537  if (GEPI == 0)
538    return MarkUnsafe(Info);
539
540  gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
541
542  // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
543  if (I == E ||
544      I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) {
545    return MarkUnsafe(Info);
546  }
547
548  ++I;
549  if (I == E) return MarkUnsafe(Info);  // ran out of GEP indices??
550
551  bool IsAllZeroIndices = true;
552
553  // If the first index is a non-constant index into an array, see if we can
554  // handle it as a special case.
555  if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
556    if (!isa<ConstantInt>(I.getOperand())) {
557      IsAllZeroIndices = 0;
558      uint64_t NumElements = AT->getNumElements();
559
560      // If this is an array index and the index is not constant, we cannot
561      // promote... that is unless the array has exactly one or two elements in
562      // it, in which case we CAN promote it, but we have to canonicalize this
563      // out if this is the only problem.
564      if ((NumElements == 1 || NumElements == 2) &&
565          AllUsersAreLoads(GEPI)) {
566        Info.needsCleanup = true;
567        return;  // Canonicalization required!
568      }
569      return MarkUnsafe(Info);
570    }
571  }
572
573  // Walk through the GEP type indices, checking the types that this indexes
574  // into.
575  for (; I != E; ++I) {
576    // Ignore struct elements, no extra checking needed for these.
577    if (isa<StructType>(*I))
578      continue;
579
580    ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
581    if (!IdxVal) return MarkUnsafe(Info);
582
583    // Are all indices still zero?
584    IsAllZeroIndices &= IdxVal->isZero();
585
586    if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
587      // This GEP indexes an array.  Verify that this is an in-range constant
588      // integer. Specifically, consider A[0][i]. We cannot know that the user
589      // isn't doing invalid things like allowing i to index an out-of-range
590      // subscript that accesses A[1].  Because of this, we have to reject SROA
591      // of any accesses into structs where any of the components are variables.
592      if (IdxVal->getZExtValue() >= AT->getNumElements())
593        return MarkUnsafe(Info);
594    } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) {
595      if (IdxVal->getZExtValue() >= VT->getNumElements())
596        return MarkUnsafe(Info);
597    }
598  }
599
600  // If there are any non-simple uses of this getelementptr, make sure to reject
601  // them.
602  return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info);
603}
604
605/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
606/// intrinsic can be promoted by SROA.  At this point, we know that the operand
607/// of the memintrinsic is a pointer to the beginning of the allocation.
608void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
609                                          unsigned OpNo, AllocaInfo &Info) {
610  // If not constant length, give up.
611  ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
612  if (!Length) return MarkUnsafe(Info);
613
614  // If not the whole aggregate, give up.
615  if (Length->getZExtValue() !=
616      TD->getTypeAllocSize(AI->getType()->getElementType()))
617    return MarkUnsafe(Info);
618
619  // We only know about memcpy/memset/memmove.
620  if (!isa<MemIntrinsic>(MI))
621    return MarkUnsafe(Info);
622
623  // Otherwise, we can transform it.  Determine whether this is a memcpy/set
624  // into or out of the aggregate.
625  if (OpNo == 1)
626    Info.isMemCpyDst = true;
627  else {
628    assert(OpNo == 2);
629    Info.isMemCpySrc = true;
630  }
631}
632
633/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
634/// are
635void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
636                                          AllocaInfo &Info) {
637  for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
638       UI != E; ++UI) {
639    if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) {
640      isSafeUseOfBitCastedAllocation(BCU, AI, Info);
641    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
642      isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info);
643    } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
644      if (SI->isVolatile())
645        return MarkUnsafe(Info);
646
647      // If storing the entire alloca in one chunk through a bitcasted pointer
648      // to integer, we can transform it.  This happens (for example) when you
649      // cast a {i32,i32}* to i64* and store through it.  This is similar to the
650      // memcpy case and occurs in various "byval" cases and emulated memcpys.
651      if (isa<IntegerType>(SI->getOperand(0)->getType()) &&
652          TD->getTypeAllocSize(SI->getOperand(0)->getType()) ==
653          TD->getTypeAllocSize(AI->getType()->getElementType())) {
654        Info.isMemCpyDst = true;
655        continue;
656      }
657      return MarkUnsafe(Info);
658    } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
659      if (LI->isVolatile())
660        return MarkUnsafe(Info);
661
662      // If loading the entire alloca in one chunk through a bitcasted pointer
663      // to integer, we can transform it.  This happens (for example) when you
664      // cast a {i32,i32}* to i64* and load through it.  This is similar to the
665      // memcpy case and occurs in various "byval" cases and emulated memcpys.
666      if (isa<IntegerType>(LI->getType()) &&
667          TD->getTypeAllocSize(LI->getType()) ==
668          TD->getTypeAllocSize(AI->getType()->getElementType())) {
669        Info.isMemCpySrc = true;
670        continue;
671      }
672      return MarkUnsafe(Info);
673    } else if (isa<DbgInfoIntrinsic>(UI)) {
674      // If one user is DbgInfoIntrinsic then check if all users are
675      // DbgInfoIntrinsics.
676      if (OnlyUsedByDbgInfoIntrinsics(BC)) {
677        Info.needsCleanup = true;
678        return;
679      }
680      else
681        MarkUnsafe(Info);
682    }
683    else {
684      return MarkUnsafe(Info);
685    }
686    if (Info.isUnsafe) return;
687  }
688}
689
690/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
691/// to its first element.  Transform users of the cast to use the new values
692/// instead.
693void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
694                                      SmallVector<AllocaInst*, 32> &NewElts) {
695  Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
696  while (UI != UE) {
697    Instruction *User = cast<Instruction>(*UI++);
698    if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) {
699      RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
700      if (BCU->use_empty()) BCU->eraseFromParent();
701      continue;
702    }
703
704    if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
705      // This must be memcpy/memmove/memset of the entire aggregate.
706      // Split into one per element.
707      RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts);
708      continue;
709    }
710
711    if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
712      // If this is a store of the entire alloca from an integer, rewrite it.
713      RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
714      continue;
715    }
716
717    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
718      // If this is a load of the entire alloca to an integer, rewrite it.
719      RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
720      continue;
721    }
722
723    // Otherwise it must be some other user of a gep of the first pointer.  Just
724    // leave these alone.
725    continue;
726  }
727}
728
729/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
730/// Rewrite it to copy or set the elements of the scalarized memory.
731void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
732                                        AllocationInst *AI,
733                                        SmallVector<AllocaInst*, 32> &NewElts) {
734
735  // If this is a memcpy/memmove, construct the other pointer as the
736  // appropriate type.  The "Other" pointer is the pointer that goes to memory
737  // that doesn't have anything to do with the alloca that we are promoting. For
738  // memset, this Value* stays null.
739  Value *OtherPtr = 0;
740  LLVMContext &Context = MI->getContext();
741  unsigned MemAlignment = MI->getAlignment();
742  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
743    if (BCInst == MTI->getRawDest())
744      OtherPtr = MTI->getRawSource();
745    else {
746      assert(BCInst == MTI->getRawSource());
747      OtherPtr = MTI->getRawDest();
748    }
749  }
750
751  // If there is an other pointer, we want to convert it to the same pointer
752  // type as AI has, so we can GEP through it safely.
753  if (OtherPtr) {
754    // It is likely that OtherPtr is a bitcast, if so, remove it.
755    if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
756      OtherPtr = BC->getOperand(0);
757    // All zero GEPs are effectively bitcasts.
758    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr))
759      if (GEP->hasAllZeroIndices())
760        OtherPtr = GEP->getOperand(0);
761
762    if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
763      if (BCE->getOpcode() == Instruction::BitCast)
764        OtherPtr = BCE->getOperand(0);
765
766    // If the pointer is not the right type, insert a bitcast to the right
767    // type.
768    if (OtherPtr->getType() != AI->getType())
769      OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
770                                 MI);
771  }
772
773  // Process each element of the aggregate.
774  Value *TheFn = MI->getOperand(0);
775  const Type *BytePtrTy = MI->getRawDest()->getType();
776  bool SROADest = MI->getRawDest() == BCInst;
777
778  Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
779
780  for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
781    // If this is a memcpy/memmove, emit a GEP of the other element address.
782    Value *OtherElt = 0;
783    unsigned OtherEltAlign = MemAlignment;
784
785    if (OtherPtr) {
786      Value *Idx[2] = { Zero,
787                      ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
788      OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
789                                           OtherPtr->getNameStr()+"."+Twine(i),
790                                           MI);
791      uint64_t EltOffset;
792      const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
793      if (const StructType *ST =
794            dyn_cast<StructType>(OtherPtrTy->getElementType())) {
795        EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
796      } else {
797        const Type *EltTy =
798          cast<SequentialType>(OtherPtr->getType())->getElementType();
799        EltOffset = TD->getTypeAllocSize(EltTy)*i;
800      }
801
802      // The alignment of the other pointer is the guaranteed alignment of the
803      // element, which is affected by both the known alignment of the whole
804      // mem intrinsic and the alignment of the element.  If the alignment of
805      // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
806      // known alignment is just 4 bytes.
807      OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
808    }
809
810    Value *EltPtr = NewElts[i];
811    const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
812
813    // If we got down to a scalar, insert a load or store as appropriate.
814    if (EltTy->isSingleValueType()) {
815      if (isa<MemTransferInst>(MI)) {
816        if (SROADest) {
817          // From Other to Alloca.
818          Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
819          new StoreInst(Elt, EltPtr, MI);
820        } else {
821          // From Alloca to Other.
822          Value *Elt = new LoadInst(EltPtr, "tmp", MI);
823          new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
824        }
825        continue;
826      }
827      assert(isa<MemSetInst>(MI));
828
829      // If the stored element is zero (common case), just store a null
830      // constant.
831      Constant *StoreVal;
832      if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
833        if (CI->isZero()) {
834          StoreVal = Constant::getNullValue(EltTy);  // 0.0, null, 0, <0,0>
835        } else {
836          // If EltTy is a vector type, get the element type.
837          const Type *ValTy = EltTy->getScalarType();
838
839          // Construct an integer with the right value.
840          unsigned EltSize = TD->getTypeSizeInBits(ValTy);
841          APInt OneVal(EltSize, CI->getZExtValue());
842          APInt TotalVal(OneVal);
843          // Set each byte.
844          for (unsigned i = 0; 8*i < EltSize; ++i) {
845            TotalVal = TotalVal.shl(8);
846            TotalVal |= OneVal;
847          }
848
849          // Convert the integer value to the appropriate type.
850          StoreVal = ConstantInt::get(Context, TotalVal);
851          if (isa<PointerType>(ValTy))
852            StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
853          else if (ValTy->isFloatingPoint())
854            StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
855          assert(StoreVal->getType() == ValTy && "Type mismatch!");
856
857          // If the requested value was a vector constant, create it.
858          if (EltTy != ValTy) {
859            unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
860            SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
861            StoreVal = ConstantVector::get(&Elts[0], NumElts);
862          }
863        }
864        new StoreInst(StoreVal, EltPtr, MI);
865        continue;
866      }
867      // Otherwise, if we're storing a byte variable, use a memset call for
868      // this element.
869    }
870
871    // Cast the element pointer to BytePtrTy.
872    if (EltPtr->getType() != BytePtrTy)
873      EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
874
875    // Cast the other pointer (if we have one) to BytePtrTy.
876    if (OtherElt && OtherElt->getType() != BytePtrTy)
877      OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
878                                 MI);
879
880    unsigned EltSize = TD->getTypeAllocSize(EltTy);
881
882    // Finally, insert the meminst for this element.
883    if (isa<MemTransferInst>(MI)) {
884      Value *Ops[] = {
885        SROADest ? EltPtr : OtherElt,  // Dest ptr
886        SROADest ? OtherElt : EltPtr,  // Src ptr
887        ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
888        // Align
889        ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
890      };
891      CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
892    } else {
893      assert(isa<MemSetInst>(MI));
894      Value *Ops[] = {
895        EltPtr, MI->getOperand(2),  // Dest, Value,
896        ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
897        Zero  // Align
898      };
899      CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
900    }
901  }
902  MI->eraseFromParent();
903}
904
905/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
906/// overwrites the entire allocation.  Extract out the pieces of the stored
907/// integer and store them individually.
908void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
909                                         AllocationInst *AI,
910                                         SmallVector<AllocaInst*, 32> &NewElts){
911  // Extract each element out of the integer according to its structure offset
912  // and store the element value to the individual alloca.
913  Value *SrcVal = SI->getOperand(0);
914  const Type *AllocaEltTy = AI->getType()->getElementType();
915  uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
916
917  // If this isn't a store of an integer to the whole alloca, it may be a store
918  // to the first element.  Just ignore the store in this case and normal SROA
919  // will handle it.
920  if (!isa<IntegerType>(SrcVal->getType()) ||
921      TD->getTypeAllocSizeInBits(SrcVal->getType()) != AllocaSizeBits)
922    return;
923  // Handle tail padding by extending the operand
924  if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
925    SrcVal = new ZExtInst(SrcVal,
926                          IntegerType::get(SI->getContext(), AllocaSizeBits),
927                          "", SI);
928
929  DEBUG(errs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI);
930
931  // There are two forms here: AI could be an array or struct.  Both cases
932  // have different ways to compute the element offset.
933  if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
934    const StructLayout *Layout = TD->getStructLayout(EltSTy);
935
936    for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
937      // Get the number of bits to shift SrcVal to get the value.
938      const Type *FieldTy = EltSTy->getElementType(i);
939      uint64_t Shift = Layout->getElementOffsetInBits(i);
940
941      if (TD->isBigEndian())
942        Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
943
944      Value *EltVal = SrcVal;
945      if (Shift) {
946        Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
947        EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
948                                            "sroa.store.elt", SI);
949      }
950
951      // Truncate down to an integer of the right size.
952      uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
953
954      // Ignore zero sized fields like {}, they obviously contain no data.
955      if (FieldSizeBits == 0) continue;
956
957      if (FieldSizeBits != AllocaSizeBits)
958        EltVal = new TruncInst(EltVal,
959                             IntegerType::get(SI->getContext(), FieldSizeBits),
960                              "", SI);
961      Value *DestField = NewElts[i];
962      if (EltVal->getType() == FieldTy) {
963        // Storing to an integer field of this size, just do it.
964      } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) {
965        // Bitcast to the right element type (for fp/vector values).
966        EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
967      } else {
968        // Otherwise, bitcast the dest pointer (for aggregates).
969        DestField = new BitCastInst(DestField,
970                              PointerType::getUnqual(EltVal->getType()),
971                                    "", SI);
972      }
973      new StoreInst(EltVal, DestField, SI);
974    }
975
976  } else {
977    const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
978    const Type *ArrayEltTy = ATy->getElementType();
979    uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
980    uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
981
982    uint64_t Shift;
983
984    if (TD->isBigEndian())
985      Shift = AllocaSizeBits-ElementOffset;
986    else
987      Shift = 0;
988
989    for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
990      // Ignore zero sized fields like {}, they obviously contain no data.
991      if (ElementSizeBits == 0) continue;
992
993      Value *EltVal = SrcVal;
994      if (Shift) {
995        Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
996        EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
997                                            "sroa.store.elt", SI);
998      }
999
1000      // Truncate down to an integer of the right size.
1001      if (ElementSizeBits != AllocaSizeBits)
1002        EltVal = new TruncInst(EltVal,
1003                               IntegerType::get(SI->getContext(),
1004                                                ElementSizeBits),"",SI);
1005      Value *DestField = NewElts[i];
1006      if (EltVal->getType() == ArrayEltTy) {
1007        // Storing to an integer field of this size, just do it.
1008      } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) {
1009        // Bitcast to the right element type (for fp/vector values).
1010        EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
1011      } else {
1012        // Otherwise, bitcast the dest pointer (for aggregates).
1013        DestField = new BitCastInst(DestField,
1014                              PointerType::getUnqual(EltVal->getType()),
1015                                    "", SI);
1016      }
1017      new StoreInst(EltVal, DestField, SI);
1018
1019      if (TD->isBigEndian())
1020        Shift -= ElementOffset;
1021      else
1022        Shift += ElementOffset;
1023    }
1024  }
1025
1026  SI->eraseFromParent();
1027}
1028
1029/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
1030/// an integer.  Load the individual pieces to form the aggregate value.
1031void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
1032                                        SmallVector<AllocaInst*, 32> &NewElts) {
1033  // Extract each element out of the NewElts according to its structure offset
1034  // and form the result value.
1035  const Type *AllocaEltTy = AI->getType()->getElementType();
1036  uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
1037
1038  // If this isn't a load of the whole alloca to an integer, it may be a load
1039  // of the first element.  Just ignore the load in this case and normal SROA
1040  // will handle it.
1041  if (!isa<IntegerType>(LI->getType()) ||
1042      TD->getTypeAllocSizeInBits(LI->getType()) != AllocaSizeBits)
1043    return;
1044
1045  DEBUG(errs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI);
1046
1047  // There are two forms here: AI could be an array or struct.  Both cases
1048  // have different ways to compute the element offset.
1049  const StructLayout *Layout = 0;
1050  uint64_t ArrayEltBitOffset = 0;
1051  if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
1052    Layout = TD->getStructLayout(EltSTy);
1053  } else {
1054    const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
1055    ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
1056  }
1057
1058  Value *ResultVal =
1059    Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
1060
1061  for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1062    // Load the value from the alloca.  If the NewElt is an aggregate, cast
1063    // the pointer to an integer of the same size before doing the load.
1064    Value *SrcField = NewElts[i];
1065    const Type *FieldTy =
1066      cast<PointerType>(SrcField->getType())->getElementType();
1067    uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
1068
1069    // Ignore zero sized fields like {}, they obviously contain no data.
1070    if (FieldSizeBits == 0) continue;
1071
1072    const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
1073                                                     FieldSizeBits);
1074    if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
1075        !isa<VectorType>(FieldTy))
1076      SrcField = new BitCastInst(SrcField,
1077                                 PointerType::getUnqual(FieldIntTy),
1078                                 "", LI);
1079    SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
1080
1081    // If SrcField is a fp or vector of the right size but that isn't an
1082    // integer type, bitcast to an integer so we can shift it.
1083    if (SrcField->getType() != FieldIntTy)
1084      SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
1085
1086    // Zero extend the field to be the same size as the final alloca so that
1087    // we can shift and insert it.
1088    if (SrcField->getType() != ResultVal->getType())
1089      SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
1090
1091    // Determine the number of bits to shift SrcField.
1092    uint64_t Shift;
1093    if (Layout) // Struct case.
1094      Shift = Layout->getElementOffsetInBits(i);
1095    else  // Array case.
1096      Shift = i*ArrayEltBitOffset;
1097
1098    if (TD->isBigEndian())
1099      Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
1100
1101    if (Shift) {
1102      Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
1103      SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
1104    }
1105
1106    ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
1107  }
1108
1109  // Handle tail padding by truncating the result
1110  if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
1111    ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
1112
1113  LI->replaceAllUsesWith(ResultVal);
1114  LI->eraseFromParent();
1115}
1116
1117
1118/// HasPadding - Return true if the specified type has any structure or
1119/// alignment padding, false otherwise.
1120static bool HasPadding(const Type *Ty, const TargetData &TD) {
1121  if (const StructType *STy = dyn_cast<StructType>(Ty)) {
1122    const StructLayout *SL = TD.getStructLayout(STy);
1123    unsigned PrevFieldBitOffset = 0;
1124    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1125      unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
1126
1127      // Padding in sub-elements?
1128      if (HasPadding(STy->getElementType(i), TD))
1129        return true;
1130
1131      // Check to see if there is any padding between this element and the
1132      // previous one.
1133      if (i) {
1134        unsigned PrevFieldEnd =
1135        PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
1136        if (PrevFieldEnd < FieldBitOffset)
1137          return true;
1138      }
1139
1140      PrevFieldBitOffset = FieldBitOffset;
1141    }
1142
1143    //  Check for tail padding.
1144    if (unsigned EltCount = STy->getNumElements()) {
1145      unsigned PrevFieldEnd = PrevFieldBitOffset +
1146                   TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
1147      if (PrevFieldEnd < SL->getSizeInBits())
1148        return true;
1149    }
1150
1151  } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1152    return HasPadding(ATy->getElementType(), TD);
1153  } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1154    return HasPadding(VTy->getElementType(), TD);
1155  }
1156  return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
1157}
1158
1159/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1160/// an aggregate can be broken down into elements.  Return 0 if not, 3 if safe,
1161/// or 1 if safe after canonicalization has been performed.
1162///
1163int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
1164  // Loop over the use list of the alloca.  We can only transform it if all of
1165  // the users are safe to transform.
1166  AllocaInfo Info;
1167
1168  for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
1169       I != E; ++I) {
1170    isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info);
1171    if (Info.isUnsafe) {
1172      DEBUG(errs() << "Cannot transform: " << *AI << "  due to user: " << **I);
1173      return 0;
1174    }
1175  }
1176
1177  // Okay, we know all the users are promotable.  If the aggregate is a memcpy
1178  // source and destination, we have to be careful.  In particular, the memcpy
1179  // could be moving around elements that live in structure padding of the LLVM
1180  // types, but may actually be used.  In these cases, we refuse to promote the
1181  // struct.
1182  if (Info.isMemCpySrc && Info.isMemCpyDst &&
1183      HasPadding(AI->getType()->getElementType(), *TD))
1184    return 0;
1185
1186  // If we require cleanup, return 1, otherwise return 3.
1187  return Info.needsCleanup ? 1 : 3;
1188}
1189
1190/// CleanupGEP - GEP is used by an Alloca, which can be prompted after the GEP
1191/// is canonicalized here.
1192void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
1193  gep_type_iterator I = gep_type_begin(GEPI);
1194  ++I;
1195
1196  const ArrayType *AT = dyn_cast<ArrayType>(*I);
1197  if (!AT)
1198    return;
1199
1200  uint64_t NumElements = AT->getNumElements();
1201
1202  if (isa<ConstantInt>(I.getOperand()))
1203    return;
1204
1205  if (NumElements == 1) {
1206    GEPI->setOperand(2,
1207                  Constant::getNullValue(Type::getInt32Ty(GEPI->getContext())));
1208    return;
1209  }
1210
1211  assert(NumElements == 2 && "Unhandled case!");
1212  // All users of the GEP must be loads.  At each use of the GEP, insert
1213  // two loads of the appropriate indexed GEP and select between them.
1214  Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(),
1215                              Constant::getNullValue(I.getOperand()->getType()),
1216                              "isone");
1217  // Insert the new GEP instructions, which are properly indexed.
1218  SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
1219  Indices[1] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()));
1220  Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
1221                                             Indices.begin(),
1222                                             Indices.end(),
1223                                             GEPI->getName()+".0", GEPI);
1224  Indices[1] = ConstantInt::get(Type::getInt32Ty(GEPI->getContext()), 1);
1225  Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
1226                                            Indices.begin(),
1227                                            Indices.end(),
1228                                            GEPI->getName()+".1", GEPI);
1229  // Replace all loads of the variable index GEP with loads from both
1230  // indexes and a select.
1231  while (!GEPI->use_empty()) {
1232    LoadInst *LI = cast<LoadInst>(GEPI->use_back());
1233    Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
1234    Value *One  = new LoadInst(OneIdx , LI->getName()+".1", LI);
1235    Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI);
1236    LI->replaceAllUsesWith(R);
1237    LI->eraseFromParent();
1238  }
1239  GEPI->eraseFromParent();
1240}
1241
1242
1243/// CleanupAllocaUsers - If SROA reported that it can promote the specified
1244/// allocation, but only if cleaned up, perform the cleanups required.
1245void SROA::CleanupAllocaUsers(AllocationInst *AI) {
1246  // At this point, we know that the end result will be SROA'd and promoted, so
1247  // we can insert ugly code if required so long as sroa+mem2reg will clean it
1248  // up.
1249  for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
1250       UI != E; ) {
1251    User *U = *UI++;
1252    if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U))
1253      CleanupGEP(GEPI);
1254    else {
1255      Instruction *I = cast<Instruction>(U);
1256      SmallVector<DbgInfoIntrinsic *, 2> DbgInUses;
1257      if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) {
1258        // Safe to remove debug info uses.
1259        while (!DbgInUses.empty()) {
1260          DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back();
1261          DI->eraseFromParent();
1262        }
1263        I->eraseFromParent();
1264      }
1265    }
1266  }
1267}
1268
1269/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1270/// the offset specified by Offset (which is specified in bytes).
1271///
1272/// There are two cases we handle here:
1273///   1) A union of vector types of the same size and potentially its elements.
1274///      Here we turn element accesses into insert/extract element operations.
1275///      This promotes a <4 x float> with a store of float to the third element
1276///      into a <4 x float> that uses insert element.
1277///   2) A fully general blob of memory, which we turn into some (potentially
1278///      large) integer type with extract and insert operations where the loads
1279///      and stores would mutate the memory.
1280static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
1281                        unsigned AllocaSize, const TargetData &TD,
1282                        LLVMContext &Context) {
1283  // If this could be contributing to a vector, analyze it.
1284  if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type.
1285
1286    // If the In type is a vector that is the same size as the alloca, see if it
1287    // matches the existing VecTy.
1288    if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
1289      if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
1290        // If we're storing/loading a vector of the right size, allow it as a
1291        // vector.  If this the first vector we see, remember the type so that
1292        // we know the element size.
1293        if (VecTy == 0)
1294          VecTy = VInTy;
1295        return;
1296      }
1297    } else if (In == Type::getFloatTy(Context) ||
1298               In == Type::getDoubleTy(Context) ||
1299               (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 &&
1300                isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
1301      // If we're accessing something that could be an element of a vector, see
1302      // if the implied vector agrees with what we already have and if Offset is
1303      // compatible with it.
1304      unsigned EltSize = In->getPrimitiveSizeInBits()/8;
1305      if (Offset % EltSize == 0 &&
1306          AllocaSize % EltSize == 0 &&
1307          (VecTy == 0 ||
1308           cast<VectorType>(VecTy)->getElementType()
1309                 ->getPrimitiveSizeInBits()/8 == EltSize)) {
1310        if (VecTy == 0)
1311          VecTy = VectorType::get(In, AllocaSize/EltSize);
1312        return;
1313      }
1314    }
1315  }
1316
1317  // Otherwise, we have a case that we can't handle with an optimized vector
1318  // form.  We can still turn this into a large integer.
1319  VecTy = Type::getVoidTy(Context);
1320}
1321
1322/// CanConvertToScalar - V is a pointer.  If we can convert the pointee and all
1323/// its accesses to use a to single vector type, return true, and set VecTy to
1324/// the new type.  If we could convert the alloca into a single promotable
1325/// integer, return true but set VecTy to VoidTy.  Further, if the use is not a
1326/// completely trivial use that mem2reg could promote, set IsNotTrivial.  Offset
1327/// is the current offset from the base of the alloca being analyzed.
1328///
1329/// If we see at least one access to the value that is as a vector type, set the
1330/// SawVec flag.
1331///
1332bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
1333                              bool &SawVec, uint64_t Offset,
1334                              unsigned AllocaSize) {
1335  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1336    Instruction *User = cast<Instruction>(*UI);
1337
1338    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1339      // Don't break volatile loads.
1340      if (LI->isVolatile())
1341        return false;
1342      MergeInType(LI->getType(), Offset, VecTy,
1343                  AllocaSize, *TD, V->getContext());
1344      SawVec |= isa<VectorType>(LI->getType());
1345      continue;
1346    }
1347
1348    if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1349      // Storing the pointer, not into the value?
1350      if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
1351      MergeInType(SI->getOperand(0)->getType(), Offset,
1352                  VecTy, AllocaSize, *TD, V->getContext());
1353      SawVec |= isa<VectorType>(SI->getOperand(0)->getType());
1354      continue;
1355    }
1356
1357    if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
1358      if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset,
1359                              AllocaSize))
1360        return false;
1361      IsNotTrivial = true;
1362      continue;
1363    }
1364
1365    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1366      // If this is a GEP with a variable indices, we can't handle it.
1367      if (!GEP->hasAllConstantIndices())
1368        return false;
1369
1370      // Compute the offset that this GEP adds to the pointer.
1371      SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1372      uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(),
1373                                                &Indices[0], Indices.size());
1374      // See if all uses can be converted.
1375      if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset,
1376                              AllocaSize))
1377        return false;
1378      IsNotTrivial = true;
1379      continue;
1380    }
1381
1382    // If this is a constant sized memset of a constant value (e.g. 0) we can
1383    // handle it.
1384    if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1385      // Store of constant value and constant size.
1386      if (isa<ConstantInt>(MSI->getValue()) &&
1387          isa<ConstantInt>(MSI->getLength())) {
1388        IsNotTrivial = true;
1389        continue;
1390      }
1391    }
1392
1393    // If this is a memcpy or memmove into or out of the whole allocation, we
1394    // can handle it like a load or store of the scalar type.
1395    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1396      if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
1397        if (Len->getZExtValue() == AllocaSize && Offset == 0) {
1398          IsNotTrivial = true;
1399          continue;
1400        }
1401    }
1402
1403    // Ignore dbg intrinsic.
1404    if (isa<DbgInfoIntrinsic>(User))
1405      continue;
1406
1407    // Otherwise, we cannot handle this!
1408    return false;
1409  }
1410
1411  return true;
1412}
1413
1414
1415/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1416/// directly.  This happens when we are converting an "integer union" to a
1417/// single integer scalar, or when we are converting a "vector union" to a
1418/// vector with insert/extractelement instructions.
1419///
1420/// Offset is an offset from the original alloca, in bits that need to be
1421/// shifted to the right.  By the end of this, there should be no uses of Ptr.
1422void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
1423  while (!Ptr->use_empty()) {
1424    Instruction *User = cast<Instruction>(Ptr->use_back());
1425
1426    if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1427      ConvertUsesToScalar(CI, NewAI, Offset);
1428      CI->eraseFromParent();
1429      continue;
1430    }
1431
1432    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1433      // Compute the offset that this GEP adds to the pointer.
1434      SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1435      uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(),
1436                                                &Indices[0], Indices.size());
1437      ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
1438      GEP->eraseFromParent();
1439      continue;
1440    }
1441
1442    IRBuilder<> Builder(User->getParent(), User);
1443
1444    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1445      // The load is a bit extract from NewAI shifted right by Offset bits.
1446      Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
1447      Value *NewLoadVal
1448        = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
1449      LI->replaceAllUsesWith(NewLoadVal);
1450      LI->eraseFromParent();
1451      continue;
1452    }
1453
1454    if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1455      assert(SI->getOperand(0) != Ptr && "Consistency error!");
1456      // FIXME: Remove once builder has Twine API.
1457      Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").str().c_str());
1458      Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
1459                                             Builder);
1460      Builder.CreateStore(New, NewAI);
1461      SI->eraseFromParent();
1462      continue;
1463    }
1464
1465    // If this is a constant sized memset of a constant value (e.g. 0) we can
1466    // transform it into a store of the expanded constant value.
1467    if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1468      assert(MSI->getRawDest() == Ptr && "Consistency error!");
1469      unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
1470      if (NumBytes != 0) {
1471        unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
1472
1473        // Compute the value replicated the right number of times.
1474        APInt APVal(NumBytes*8, Val);
1475
1476        // Splat the value if non-zero.
1477        if (Val)
1478          for (unsigned i = 1; i != NumBytes; ++i)
1479            APVal |= APVal << 8;
1480
1481        // FIXME: Remove once builder has Twine API.
1482        Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").str().c_str());
1483        Value *New = ConvertScalar_InsertValue(
1484                                    ConstantInt::get(User->getContext(), APVal),
1485                                               Old, Offset, Builder);
1486        Builder.CreateStore(New, NewAI);
1487      }
1488      MSI->eraseFromParent();
1489      continue;
1490    }
1491
1492    // If this is a memcpy or memmove into or out of the whole allocation, we
1493    // can handle it like a load or store of the scalar type.
1494    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1495      assert(Offset == 0 && "must be store to start of alloca");
1496
1497      // If the source and destination are both to the same alloca, then this is
1498      // a noop copy-to-self, just delete it.  Otherwise, emit a load and store
1499      // as appropriate.
1500      AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject());
1501
1502      if (MTI->getSource()->getUnderlyingObject() != OrigAI) {
1503        // Dest must be OrigAI, change this to be a load from the original
1504        // pointer (bitcasted), then a store to our new alloca.
1505        assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
1506        Value *SrcPtr = MTI->getSource();
1507        SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
1508
1509        LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
1510        SrcVal->setAlignment(MTI->getAlignment());
1511        Builder.CreateStore(SrcVal, NewAI);
1512      } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) {
1513        // Src must be OrigAI, change this to be a load from NewAI then a store
1514        // through the original dest pointer (bitcasted).
1515        assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
1516        LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
1517
1518        Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
1519        StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
1520        NewStore->setAlignment(MTI->getAlignment());
1521      } else {
1522        // Noop transfer. Src == Dst
1523      }
1524
1525
1526      MTI->eraseFromParent();
1527      continue;
1528    }
1529
1530    // If user is a dbg info intrinsic then it is safe to remove it.
1531    if (isa<DbgInfoIntrinsic>(User)) {
1532      User->eraseFromParent();
1533      continue;
1534    }
1535
1536    llvm_unreachable("Unsupported operation!");
1537  }
1538}
1539
1540/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
1541/// or vector value FromVal, extracting the bits from the offset specified by
1542/// Offset.  This returns the value, which is of type ToType.
1543///
1544/// This happens when we are converting an "integer union" to a single
1545/// integer scalar, or when we are converting a "vector union" to a vector with
1546/// insert/extractelement instructions.
1547///
1548/// Offset is an offset from the original alloca, in bits that need to be
1549/// shifted to the right.
1550Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
1551                                        uint64_t Offset, IRBuilder<> &Builder) {
1552  // If the load is of the whole new alloca, no conversion is needed.
1553  if (FromVal->getType() == ToType && Offset == 0)
1554    return FromVal;
1555
1556  // If the result alloca is a vector type, this is either an element
1557  // access or a bitcast to another vector type of the same size.
1558  if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
1559    if (isa<VectorType>(ToType))
1560      return Builder.CreateBitCast(FromVal, ToType, "tmp");
1561
1562    // Otherwise it must be an element access.
1563    unsigned Elt = 0;
1564    if (Offset) {
1565      unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
1566      Elt = Offset/EltSize;
1567      assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
1568    }
1569    // Return the element extracted out of it.
1570    Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
1571                    Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
1572    if (V->getType() != ToType)
1573      V = Builder.CreateBitCast(V, ToType, "tmp");
1574    return V;
1575  }
1576
1577  // If ToType is a first class aggregate, extract out each of the pieces and
1578  // use insertvalue's to form the FCA.
1579  if (const StructType *ST = dyn_cast<StructType>(ToType)) {
1580    const StructLayout &Layout = *TD->getStructLayout(ST);
1581    Value *Res = UndefValue::get(ST);
1582    for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1583      Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
1584                                        Offset+Layout.getElementOffsetInBits(i),
1585                                              Builder);
1586      Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1587    }
1588    return Res;
1589  }
1590
1591  if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
1592    uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
1593    Value *Res = UndefValue::get(AT);
1594    for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1595      Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
1596                                              Offset+i*EltSize, Builder);
1597      Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1598    }
1599    return Res;
1600  }
1601
1602  // Otherwise, this must be a union that was converted to an integer value.
1603  const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
1604
1605  // If this is a big-endian system and the load is narrower than the
1606  // full alloca type, we need to do a shift to get the right bits.
1607  int ShAmt = 0;
1608  if (TD->isBigEndian()) {
1609    // On big-endian machines, the lowest bit is stored at the bit offset
1610    // from the pointer given by getTypeStoreSizeInBits.  This matters for
1611    // integers with a bitwidth that is not a multiple of 8.
1612    ShAmt = TD->getTypeStoreSizeInBits(NTy) -
1613            TD->getTypeStoreSizeInBits(ToType) - Offset;
1614  } else {
1615    ShAmt = Offset;
1616  }
1617
1618  // Note: we support negative bitwidths (with shl) which are not defined.
1619  // We do this to support (f.e.) loads off the end of a structure where
1620  // only some bits are used.
1621  if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
1622    FromVal = Builder.CreateLShr(FromVal,
1623                                 ConstantInt::get(FromVal->getType(),
1624                                                           ShAmt), "tmp");
1625  else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
1626    FromVal = Builder.CreateShl(FromVal,
1627                                ConstantInt::get(FromVal->getType(),
1628                                                          -ShAmt), "tmp");
1629
1630  // Finally, unconditionally truncate the integer to the right width.
1631  unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
1632  if (LIBitWidth < NTy->getBitWidth())
1633    FromVal =
1634      Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
1635                                                    LIBitWidth), "tmp");
1636  else if (LIBitWidth > NTy->getBitWidth())
1637    FromVal =
1638       Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
1639                                                    LIBitWidth), "tmp");
1640
1641  // If the result is an integer, this is a trunc or bitcast.
1642  if (isa<IntegerType>(ToType)) {
1643    // Should be done.
1644  } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) {
1645    // Just do a bitcast, we know the sizes match up.
1646    FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
1647  } else {
1648    // Otherwise must be a pointer.
1649    FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
1650  }
1651  assert(FromVal->getType() == ToType && "Didn't convert right?");
1652  return FromVal;
1653}
1654
1655
1656/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
1657/// or vector value "Old" at the offset specified by Offset.
1658///
1659/// This happens when we are converting an "integer union" to a
1660/// single integer scalar, or when we are converting a "vector union" to a
1661/// vector with insert/extractelement instructions.
1662///
1663/// Offset is an offset from the original alloca, in bits that need to be
1664/// shifted to the right.
1665Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
1666                                       uint64_t Offset, IRBuilder<> &Builder) {
1667
1668  // Convert the stored type to the actual type, shift it left to insert
1669  // then 'or' into place.
1670  const Type *AllocaType = Old->getType();
1671  LLVMContext &Context = Old->getContext();
1672
1673  if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
1674    uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
1675    uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType());
1676
1677    // Changing the whole vector with memset or with an access of a different
1678    // vector type?
1679    if (ValSize == VecSize)
1680      return Builder.CreateBitCast(SV, AllocaType, "tmp");
1681
1682    uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
1683
1684    // Must be an element insertion.
1685    unsigned Elt = Offset/EltSize;
1686
1687    if (SV->getType() != VTy->getElementType())
1688      SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
1689
1690    SV = Builder.CreateInsertElement(Old, SV,
1691                     ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
1692                                     "tmp");
1693    return SV;
1694  }
1695
1696  // If SV is a first-class aggregate value, insert each value recursively.
1697  if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
1698    const StructLayout &Layout = *TD->getStructLayout(ST);
1699    for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1700      Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1701      Old = ConvertScalar_InsertValue(Elt, Old,
1702                                      Offset+Layout.getElementOffsetInBits(i),
1703                                      Builder);
1704    }
1705    return Old;
1706  }
1707
1708  if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
1709    uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
1710    for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1711      Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1712      Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
1713    }
1714    return Old;
1715  }
1716
1717  // If SV is a float, convert it to the appropriate integer type.
1718  // If it is a pointer, do the same.
1719  unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType());
1720  unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
1721  unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
1722  unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
1723  if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
1724    SV = Builder.CreateBitCast(SV,
1725                            IntegerType::get(SV->getContext(),SrcWidth), "tmp");
1726  else if (isa<PointerType>(SV->getType()))
1727    SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp");
1728
1729  // Zero extend or truncate the value if needed.
1730  if (SV->getType() != AllocaType) {
1731    if (SV->getType()->getPrimitiveSizeInBits() <
1732             AllocaType->getPrimitiveSizeInBits())
1733      SV = Builder.CreateZExt(SV, AllocaType, "tmp");
1734    else {
1735      // Truncation may be needed if storing more than the alloca can hold
1736      // (undefined behavior).
1737      SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
1738      SrcWidth = DestWidth;
1739      SrcStoreWidth = DestStoreWidth;
1740    }
1741  }
1742
1743  // If this is a big-endian system and the store is narrower than the
1744  // full alloca type, we need to do a shift to get the right bits.
1745  int ShAmt = 0;
1746  if (TD->isBigEndian()) {
1747    // On big-endian machines, the lowest bit is stored at the bit offset
1748    // from the pointer given by getTypeStoreSizeInBits.  This matters for
1749    // integers with a bitwidth that is not a multiple of 8.
1750    ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1751  } else {
1752    ShAmt = Offset;
1753  }
1754
1755  // Note: we support negative bitwidths (with shr) which are not defined.
1756  // We do this to support (f.e.) stores off the end of a structure where
1757  // only some bits in the structure are set.
1758  APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1759  if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1760    SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
1761                           ShAmt), "tmp");
1762    Mask <<= ShAmt;
1763  } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1764    SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
1765                            -ShAmt), "tmp");
1766    Mask = Mask.lshr(-ShAmt);
1767  }
1768
1769  // Mask out the bits we are about to insert from the old value, and or
1770  // in the new bits.
1771  if (SrcWidth != DestWidth) {
1772    assert(DestWidth > SrcWidth);
1773    Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
1774    SV = Builder.CreateOr(Old, SV, "ins");
1775  }
1776  return SV;
1777}
1778
1779
1780
1781/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1782/// some part of a constant global variable.  This intentionally only accepts
1783/// constant expressions because we don't can't rewrite arbitrary instructions.
1784static bool PointsToConstantGlobal(Value *V) {
1785  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1786    return GV->isConstant();
1787  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1788    if (CE->getOpcode() == Instruction::BitCast ||
1789        CE->getOpcode() == Instruction::GetElementPtr)
1790      return PointsToConstantGlobal(CE->getOperand(0));
1791  return false;
1792}
1793
1794/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1795/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
1796/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
1797/// track of whether it moves the pointer (with isOffset) but otherwise traverse
1798/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
1799/// the alloca, and if the source pointer is a pointer to a constant  global, we
1800/// can optimize this.
1801static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1802                                           bool isOffset) {
1803  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1804    if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
1805      // Ignore non-volatile loads, they are always ok.
1806      if (!LI->isVolatile())
1807        continue;
1808
1809    if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1810      // If uses of the bitcast are ok, we are ok.
1811      if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1812        return false;
1813      continue;
1814    }
1815    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1816      // If the GEP has all zero indices, it doesn't offset the pointer.  If it
1817      // doesn't, it does.
1818      if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1819                                         isOffset || !GEP->hasAllZeroIndices()))
1820        return false;
1821      continue;
1822    }
1823
1824    // If this is isn't our memcpy/memmove, reject it as something we can't
1825    // handle.
1826    if (!isa<MemTransferInst>(*UI))
1827      return false;
1828
1829    // If we already have seen a copy, reject the second one.
1830    if (TheCopy) return false;
1831
1832    // If the pointer has been offset from the start of the alloca, we can't
1833    // safely handle this.
1834    if (isOffset) return false;
1835
1836    // If the memintrinsic isn't using the alloca as the dest, reject it.
1837    if (UI.getOperandNo() != 1) return false;
1838
1839    MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1840
1841    // If the source of the memcpy/move is not a constant global, reject it.
1842    if (!PointsToConstantGlobal(MI->getOperand(2)))
1843      return false;
1844
1845    // Otherwise, the transform is safe.  Remember the copy instruction.
1846    TheCopy = MI;
1847  }
1848  return true;
1849}
1850
1851/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1852/// modified by a copy from a constant global.  If we can prove this, we can
1853/// replace any uses of the alloca with uses of the global directly.
1854Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
1855  Instruction *TheCopy = 0;
1856  if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
1857    return TheCopy;
1858  return 0;
1859}
1860