ScalarReplAggregates.cpp revision 6860f6a01ccc71d7cad61de506e0cf8ecb8ca146
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This transformation implements the well known scalar replacement of
11// aggregates transformation.  This xform breaks up alloca instructions of
12// aggregate type (structure or array) into individual alloca instructions for
13// each member (if possible).  Then, if possible, it transforms the individual
14// alloca instructions into nice clean scalar SSA form.
15//
16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17// often interact, especially for C++ programs.  As such, iterating between
18// SRoA, then Mem2Reg until we run out of things to promote works well.
19//
20//===----------------------------------------------------------------------===//
21
22#include "llvm/Transforms/Scalar.h"
23#include "llvm/Constants.h"
24#include "llvm/DerivedTypes.h"
25#include "llvm/Function.h"
26#include "llvm/Pass.h"
27#include "llvm/Instructions.h"
28#include "llvm/Analysis/Dominators.h"
29#include "llvm/Target/TargetData.h"
30#include "llvm/Transforms/Utils/PromoteMemToReg.h"
31#include "llvm/Support/GetElementPtrTypeIterator.h"
32#include "llvm/Support/MathExtras.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/ADT/StringExtras.h"
36using namespace llvm;
37
38namespace {
39  Statistic<> NumReplaced("scalarrepl", "Number of allocas broken up");
40  Statistic<> NumPromoted("scalarrepl", "Number of allocas promoted");
41  Statistic<> NumConverted("scalarrepl",
42                           "Number of aggregates converted to scalar");
43
44  struct SROA : public FunctionPass {
45    bool runOnFunction(Function &F);
46
47    bool performScalarRepl(Function &F);
48    bool performPromotion(Function &F);
49
50    // getAnalysisUsage - This pass does not require any passes, but we know it
51    // will not alter the CFG, so say so.
52    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
53      AU.addRequired<DominatorTree>();
54      AU.addRequired<DominanceFrontier>();
55      AU.addRequired<TargetData>();
56      AU.setPreservesCFG();
57    }
58
59  private:
60    int isSafeElementUse(Value *Ptr);
61    int isSafeUseOfAllocation(Instruction *User);
62    int isSafeAllocaToScalarRepl(AllocationInst *AI);
63    void CanonicalizeAllocaUsers(AllocationInst *AI);
64    AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
65
66    const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
67    void ConvertToScalar(AllocationInst *AI, const Type *Ty);
68    void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
69  };
70
71  RegisterOpt<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
72}
73
74// Public interface to the ScalarReplAggregates pass
75FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); }
76
77
78bool SROA::runOnFunction(Function &F) {
79  bool Changed = performPromotion(F);
80  while (1) {
81    bool LocalChange = performScalarRepl(F);
82    if (!LocalChange) break;   // No need to repromote if no scalarrepl
83    Changed = true;
84    LocalChange = performPromotion(F);
85    if (!LocalChange) break;   // No need to re-scalarrepl if no promotion
86  }
87
88  return Changed;
89}
90
91
92bool SROA::performPromotion(Function &F) {
93  std::vector<AllocaInst*> Allocas;
94  const TargetData &TD = getAnalysis<TargetData>();
95  DominatorTree     &DT = getAnalysis<DominatorTree>();
96  DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
97
98  BasicBlock &BB = F.getEntryBlock();  // Get the entry node for the function
99
100  bool Changed = false;
101
102  while (1) {
103    Allocas.clear();
104
105    // Find allocas that are safe to promote, by looking at all instructions in
106    // the entry node
107    for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
108      if (AllocaInst *AI = dyn_cast<AllocaInst>(I))       // Is it an alloca?
109        if (isAllocaPromotable(AI, TD))
110          Allocas.push_back(AI);
111
112    if (Allocas.empty()) break;
113
114    PromoteMemToReg(Allocas, DT, DF, TD);
115    NumPromoted += Allocas.size();
116    Changed = true;
117  }
118
119  return Changed;
120}
121
122// performScalarRepl - This algorithm is a simple worklist driven algorithm,
123// which runs on all of the malloc/alloca instructions in the function, removing
124// them if they are only used by getelementptr instructions.
125//
126bool SROA::performScalarRepl(Function &F) {
127  std::vector<AllocationInst*> WorkList;
128
129  // Scan the entry basic block, adding any alloca's and mallocs to the worklist
130  BasicBlock &BB = F.getEntryBlock();
131  for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
132    if (AllocationInst *A = dyn_cast<AllocationInst>(I))
133      WorkList.push_back(A);
134
135  // Process the worklist
136  bool Changed = false;
137  while (!WorkList.empty()) {
138    AllocationInst *AI = WorkList.back();
139    WorkList.pop_back();
140
141    // If we can turn this aggregate value (potentially with casts) into a
142    // simple scalar value that can be mem2reg'd into a register value.
143    bool IsNotTrivial = false;
144    if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
145      if (IsNotTrivial) {
146        ConvertToScalar(AI, ActualType);
147        Changed = true;
148        continue;
149      }
150
151    // We cannot transform the allocation instruction if it is an array
152    // allocation (allocations OF arrays are ok though), and an allocation of a
153    // scalar value cannot be decomposed at all.
154    //
155    if (AI->isArrayAllocation() ||
156        (!isa<StructType>(AI->getAllocatedType()) &&
157         !isa<ArrayType>(AI->getAllocatedType()))) continue;
158
159    // Check that all of the users of the allocation are capable of being
160    // transformed.
161    switch (isSafeAllocaToScalarRepl(AI)) {
162    default: assert(0 && "Unexpected value!");
163    case 0:  // Not safe to scalar replace.
164      continue;
165    case 1:  // Safe, but requires cleanup/canonicalizations first
166      CanonicalizeAllocaUsers(AI);
167    case 3:  // Safe to scalar replace.
168      break;
169    }
170
171    DEBUG(std::cerr << "Found inst to xform: " << *AI);
172    Changed = true;
173
174    std::vector<AllocaInst*> ElementAllocas;
175    if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
176      ElementAllocas.reserve(ST->getNumContainedTypes());
177      for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
178        AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
179                                        AI->getAlignment(),
180                                        AI->getName() + "." + utostr(i), AI);
181        ElementAllocas.push_back(NA);
182        WorkList.push_back(NA);  // Add to worklist for recursive processing
183      }
184    } else {
185      const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
186      ElementAllocas.reserve(AT->getNumElements());
187      const Type *ElTy = AT->getElementType();
188      for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
189        AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
190                                        AI->getName() + "." + utostr(i), AI);
191        ElementAllocas.push_back(NA);
192        WorkList.push_back(NA);  // Add to worklist for recursive processing
193      }
194    }
195
196    // Now that we have created the alloca instructions that we want to use,
197    // expand the getelementptr instructions to use them.
198    //
199    while (!AI->use_empty()) {
200      Instruction *User = cast<Instruction>(AI->use_back());
201      GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
202      // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
203      unsigned Idx =
204         (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getRawValue();
205
206      assert(Idx < ElementAllocas.size() && "Index out of range?");
207      AllocaInst *AllocaToUse = ElementAllocas[Idx];
208
209      Value *RepValue;
210      if (GEPI->getNumOperands() == 3) {
211        // Do not insert a new getelementptr instruction with zero indices, only
212        // to have it optimized out later.
213        RepValue = AllocaToUse;
214      } else {
215        // We are indexing deeply into the structure, so we still need a
216        // getelement ptr instruction to finish the indexing.  This may be
217        // expanded itself once the worklist is rerun.
218        //
219        std::string OldName = GEPI->getName();  // Steal the old name.
220        std::vector<Value*> NewArgs;
221        NewArgs.push_back(Constant::getNullValue(Type::IntTy));
222        NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end());
223        GEPI->setName("");
224        RepValue = new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI);
225      }
226
227      // Move all of the users over to the new GEP.
228      GEPI->replaceAllUsesWith(RepValue);
229      // Delete the old GEP
230      GEPI->eraseFromParent();
231    }
232
233    // Finally, delete the Alloca instruction
234    AI->getParent()->getInstList().erase(AI);
235    NumReplaced++;
236  }
237
238  return Changed;
239}
240
241
242/// isSafeElementUse - Check to see if this use is an allowed use for a
243/// getelementptr instruction of an array aggregate allocation.
244///
245int SROA::isSafeElementUse(Value *Ptr) {
246  for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
247       I != E; ++I) {
248    Instruction *User = cast<Instruction>(*I);
249    switch (User->getOpcode()) {
250    case Instruction::Load:  break;
251    case Instruction::Store:
252      // Store is ok if storing INTO the pointer, not storing the pointer
253      if (User->getOperand(0) == Ptr) return 0;
254      break;
255    case Instruction::GetElementPtr: {
256      GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
257      if (GEP->getNumOperands() > 1) {
258        if (!isa<Constant>(GEP->getOperand(1)) ||
259            !cast<Constant>(GEP->getOperand(1))->isNullValue())
260          return 0;  // Using pointer arithmetic to navigate the array...
261      }
262      if (!isSafeElementUse(GEP)) return 0;
263      break;
264    }
265    default:
266      DEBUG(std::cerr << "  Transformation preventing inst: " << *User);
267      return 0;
268    }
269  }
270  return 3;  // All users look ok :)
271}
272
273/// AllUsersAreLoads - Return true if all users of this value are loads.
274static bool AllUsersAreLoads(Value *Ptr) {
275  for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
276       I != E; ++I)
277    if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
278      return false;
279  return true;
280}
281
282/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
283/// aggregate allocation.
284///
285int SROA::isSafeUseOfAllocation(Instruction *User) {
286  if (!isa<GetElementPtrInst>(User)) return 0;
287
288  GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
289  gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
290
291  // The GEP is safe to transform if it is of the form GEP <ptr>, 0, <cst>
292  if (I == E ||
293      I.getOperand() != Constant::getNullValue(I.getOperand()->getType()))
294    return 0;
295
296  ++I;
297  if (I == E) return 0;  // ran out of GEP indices??
298
299  // If this is a use of an array allocation, do a bit more checking for sanity.
300  if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
301    uint64_t NumElements = AT->getNumElements();
302
303    if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
304      // Check to make sure that index falls within the array.  If not,
305      // something funny is going on, so we won't do the optimization.
306      //
307      if (cast<ConstantInt>(GEPI->getOperand(2))->getRawValue() >= NumElements)
308        return 0;
309
310    } else {
311      // If this is an array index and the index is not constant, we cannot
312      // promote... that is unless the array has exactly one or two elements in
313      // it, in which case we CAN promote it, but we have to canonicalize this
314      // out if this is the only problem.
315      if (NumElements == 1 || NumElements == 2)
316        return AllUsersAreLoads(GEPI) ? 1 : 0;  // Canonicalization required!
317      return 0;
318    }
319  }
320
321  // If there are any non-simple uses of this getelementptr, make sure to reject
322  // them.
323  return isSafeElementUse(GEPI);
324}
325
326/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
327/// an aggregate can be broken down into elements.  Return 0 if not, 3 if safe,
328/// or 1 if safe after canonicalization has been performed.
329///
330int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
331  // Loop over the use list of the alloca.  We can only transform it if all of
332  // the users are safe to transform.
333  //
334  int isSafe = 3;
335  for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
336       I != E; ++I) {
337    isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I));
338    if (isSafe == 0) {
339      DEBUG(std::cerr << "Cannot transform: " << *AI << "  due to user: "
340            << **I);
341      return 0;
342    }
343  }
344  // If we require cleanup, isSafe is now 1, otherwise it is 3.
345  return isSafe;
346}
347
348/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
349/// allocation, but only if cleaned up, perform the cleanups required.
350void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
351  // At this point, we know that the end result will be SROA'd and promoted, so
352  // we can insert ugly code if required so long as sroa+mem2reg will clean it
353  // up.
354  for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
355       UI != E; ) {
356    GetElementPtrInst *GEPI = cast<GetElementPtrInst>(*UI++);
357    gep_type_iterator I = gep_type_begin(GEPI);
358    ++I;
359
360    if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
361      uint64_t NumElements = AT->getNumElements();
362
363      if (!isa<ConstantInt>(I.getOperand())) {
364        if (NumElements == 1) {
365          GEPI->setOperand(2, Constant::getNullValue(Type::IntTy));
366        } else {
367          assert(NumElements == 2 && "Unhandled case!");
368          // All users of the GEP must be loads.  At each use of the GEP, insert
369          // two loads of the appropriate indexed GEP and select between them.
370          Value *IsOne = BinaryOperator::createSetNE(I.getOperand(),
371                              Constant::getNullValue(I.getOperand()->getType()),
372                                                     "isone", GEPI);
373          // Insert the new GEP instructions, which are properly indexed.
374          std::vector<Value*> Indices(GEPI->op_begin()+1, GEPI->op_end());
375          Indices[1] = Constant::getNullValue(Type::IntTy);
376          Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices,
377                                                 GEPI->getName()+".0", GEPI);
378          Indices[1] = ConstantInt::get(Type::IntTy, 1);
379          Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices,
380                                                GEPI->getName()+".1", GEPI);
381          // Replace all loads of the variable index GEP with loads from both
382          // indexes and a select.
383          while (!GEPI->use_empty()) {
384            LoadInst *LI = cast<LoadInst>(GEPI->use_back());
385            Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
386            Value *One  = new LoadInst(OneIdx , LI->getName()+".1", LI);
387            Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
388            LI->replaceAllUsesWith(R);
389            LI->eraseFromParent();
390          }
391          GEPI->eraseFromParent();
392        }
393      }
394    }
395  }
396}
397
398/// MergeInType - Add the 'In' type to the accumulated type so far.  If the
399/// types are incompatible, return true, otherwise update Accum and return
400/// false.
401static bool MergeInType(const Type *In, const Type *&Accum) {
402  if (!In->isIntegral()) return true;
403
404  // If this is our first type, just use it.
405  if (Accum == Type::VoidTy) {
406    Accum = In;
407  } else {
408    // Otherwise pick whichever type is larger.
409    if (In->getTypeID() > Accum->getTypeID())
410      Accum = In;
411  }
412  return false;
413}
414
415/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
416/// as big as the specified type.  If there is no suitable type, this returns
417/// null.
418const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
419  if (NumBits > 64) return 0;
420  if (NumBits > 32) return Type::ULongTy;
421  if (NumBits > 16) return Type::UIntTy;
422  if (NumBits > 8) return Type::UShortTy;
423  return Type::UByteTy;
424}
425
426/// CanConvertToScalar - V is a pointer.  If we can convert the pointee to a
427/// single scalar integer type, return that type.  Further, if the use is not
428/// a completely trivial use that mem2reg could promote, set IsNotTrivial.  If
429/// there are no uses of this pointer, return Type::VoidTy to differentiate from
430/// failure.
431///
432const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
433  const Type *UsedType = Type::VoidTy; // No uses, no forced type.
434  const TargetData &TD = getAnalysis<TargetData>();
435  const PointerType *PTy = cast<PointerType>(V->getType());
436
437  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
438    Instruction *User = cast<Instruction>(*UI);
439
440    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
441      if (MergeInType(LI->getType(), UsedType))
442        return 0;
443
444    } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
445      // Storing the pointer, not the into the value?
446      if (SI->getOperand(0) == V) return 0;
447
448      // NOTE: We could handle storing of FP imms here!
449
450      if (MergeInType(SI->getOperand(0)->getType(), UsedType))
451        return 0;
452    } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
453      if (!isa<PointerType>(CI->getType())) return 0;
454      IsNotTrivial = true;
455      const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
456      if (!SubTy || MergeInType(SubTy, UsedType)) return 0;
457    } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
458      // Check to see if this is stepping over an element: GEP Ptr, int C
459      if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
460        unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue();
461        unsigned ElSize = TD.getTypeSize(PTy->getElementType());
462        unsigned BitOffset = Idx*ElSize*8;
463        if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
464
465        IsNotTrivial = true;
466        const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
467        if (SubElt == 0) return 0;
468        if (SubElt != Type::VoidTy) {
469          const Type *NewTy =
470            getUIntAtLeastAsBitAs(SubElt->getPrimitiveSizeInBits()+BitOffset);
471          if (NewTy == 0 || MergeInType(NewTy, UsedType)) return 0;
472          continue;
473        }
474      } else if (GEP->getNumOperands() == 3 &&
475                 isa<ConstantInt>(GEP->getOperand(1)) &&
476                 isa<ConstantInt>(GEP->getOperand(2)) &&
477                 cast<Constant>(GEP->getOperand(1))->isNullValue()) {
478        // We are stepping into an element, e.g. a structure or an array:
479        // GEP Ptr, int 0, uint C
480        const Type *AggTy = PTy->getElementType();
481        unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue();
482
483        if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
484          if (Idx >= ATy->getNumElements()) return 0;  // Out of range.
485        } else if (const PackedType *PTy = dyn_cast<PackedType>(AggTy)) {
486          if (Idx >= PTy->getNumElements()) return 0;  // Out of range.
487        } else if (isa<StructType>(AggTy)) {
488          // Structs are always ok.
489        } else {
490          return 0;
491        }
492        const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
493        if (NTy == 0 || MergeInType(NTy, UsedType)) return 0;
494        const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
495        if (SubTy == 0) return 0;
496        if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType))
497          return 0;
498        continue;    // Everything looks ok
499      }
500      return 0;
501    } else {
502      // Cannot handle this!
503      return 0;
504    }
505  }
506
507  return UsedType;
508}
509
510/// ConvertToScalar - The specified alloca passes the CanConvertToScalar
511/// predicate and is non-trivial.  Convert it to something that can be trivially
512/// promoted into a register by mem2reg.
513void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
514  DEBUG(std::cerr << "CONVERT TO SCALAR: " << *AI << "  TYPE = "
515                  << *ActualTy << "\n");
516  ++NumConverted;
517
518  BasicBlock *EntryBlock = AI->getParent();
519  assert(EntryBlock == &EntryBlock->getParent()->front() &&
520         "Not in the entry block!");
521  EntryBlock->getInstList().remove(AI);  // Take the alloca out of the program.
522
523  // Create and insert the alloca.
524  AllocaInst *NewAI = new AllocaInst(ActualTy->getUnsignedVersion(), 0,
525                                     AI->getName(), EntryBlock->begin());
526  ConvertUsesToScalar(AI, NewAI, 0);
527  delete AI;
528}
529
530
531/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
532/// directly.  Offset is an offset from the original alloca, in bits that need
533/// to be shifted to the right.  By the end of this, there should be no uses of
534/// Ptr.
535void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
536  while (!Ptr->use_empty()) {
537    Instruction *User = cast<Instruction>(Ptr->use_back());
538
539    if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
540      // The load is a bit extract from NewAI shifted right by Offset bits.
541      Value *NV = new LoadInst(NewAI, LI->getName(), LI);
542      if (Offset && Offset < NV->getType()->getPrimitiveSizeInBits())
543        NV = new ShiftInst(Instruction::Shr, NV,
544                           ConstantUInt::get(Type::UByteTy, Offset),
545                           LI->getName(), LI);
546      if (NV->getType() != LI->getType())
547        NV = new CastInst(NV, LI->getType(), LI->getName(), LI);
548      LI->replaceAllUsesWith(NV);
549      LI->eraseFromParent();
550    } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
551      assert(SI->getOperand(0) != Ptr && "Consistency error!");
552
553      // Convert the stored type to the actual type, shift it left to insert
554      // then 'or' into place.
555      Value *SV = SI->getOperand(0);
556      if (SV->getType() == NewAI->getType()->getElementType()) {
557        assert(Offset == 0 && "Store out of bounds!");
558      } else {
559        Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
560        // If SV is signed, convert it to unsigned, so that the next cast zero
561        // extends the value.
562        if (SV->getType()->isSigned())
563          SV = new CastInst(SV, SV->getType()->getUnsignedVersion(),
564                            SV->getName(), SI);
565        SV = new CastInst(SV, Old->getType(), SV->getName(), SI);
566        if (Offset && Offset < SV->getType()->getPrimitiveSizeInBits())
567          SV = new ShiftInst(Instruction::Shl, SV,
568                             ConstantUInt::get(Type::UByteTy, Offset),
569                             SV->getName()+".adj", SI);
570        // Mask out the bits we are about to insert from the old value.
571        unsigned TotalBits = SV->getType()->getPrimitiveSizeInBits();
572        unsigned InsertBits =
573          SI->getOperand(0)->getType()->getPrimitiveSizeInBits();
574        if (TotalBits != InsertBits) {
575          assert(TotalBits > InsertBits);
576          uint64_t Mask = ~(((1ULL << InsertBits)-1) << Offset);
577          if (TotalBits != 64)
578            Mask = Mask & ((1ULL << TotalBits)-1);
579          Old = BinaryOperator::createAnd(Old,
580                                        ConstantUInt::get(Old->getType(), Mask),
581                                          Old->getName()+".mask", SI);
582          SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
583        }
584      }
585      new StoreInst(SV, NewAI, SI);
586      SI->eraseFromParent();
587
588    } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
589      unsigned NewOff = Offset;
590      const TargetData &TD = getAnalysis<TargetData>();
591      if (TD.isBigEndian()) {
592        // Adjust the pointer.  For example, storing 16-bits into a 32-bit
593        // alloca with just a cast makes it modify the top 16-bits.
594        const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType();
595        const Type *DstTy = cast<PointerType>(CI->getType())->getElementType();
596        int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8;
597        NewOff += PtrDiffBits;
598      }
599      ConvertUsesToScalar(CI, NewAI, NewOff);
600      CI->eraseFromParent();
601    } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
602      const PointerType *AggPtrTy =
603        cast<PointerType>(GEP->getOperand(0)->getType());
604      const TargetData &TD = getAnalysis<TargetData>();
605      unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
606
607      // Check to see if this is stepping over an element: GEP Ptr, int C
608      unsigned NewOffset = Offset;
609      if (GEP->getNumOperands() == 2) {
610        unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue();
611        unsigned BitOffset = Idx*AggSizeInBits;
612
613        if (TD.isLittleEndian())
614          NewOffset += BitOffset;
615        else
616          NewOffset -= BitOffset;
617
618      } else if (GEP->getNumOperands() == 3) {
619        // We know that operand #2 is zero.
620        unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue();
621        const Type *AggTy = AggPtrTy->getElementType();
622        if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
623          unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
624
625          if (TD.isLittleEndian())
626            NewOffset += ElSizeBits*Idx;
627          else
628            NewOffset += AggSizeInBits-ElSizeBits*(Idx+1);
629        } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
630          unsigned EltBitOffset = TD.getStructLayout(STy)->MemberOffsets[Idx]*8;
631
632          if (TD.isLittleEndian())
633            NewOffset += EltBitOffset;
634          else {
635            const PointerType *ElPtrTy = cast<PointerType>(GEP->getType());
636            unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8;
637            NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits);
638          }
639
640        } else {
641          assert(0 && "Unsupported operation!");
642          abort();
643        }
644      } else {
645        assert(0 && "Unsupported operation!");
646        abort();
647      }
648      ConvertUsesToScalar(GEP, NewAI, NewOffset);
649      GEP->eraseFromParent();
650    } else {
651      assert(0 && "Unsupported operation!");
652      abort();
653    }
654  }
655}
656