InstCombineLoadStoreAlloca.cpp revision 209178dacacb5c254926a9d8c72933f23feced9f
1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visit functions for load, store and alloca.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombine.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/Loads.h"
17#include "llvm/IR/DataLayout.h"
18#include "llvm/IR/IntrinsicInst.h"
19#include "llvm/Transforms/Utils/BasicBlockUtils.h"
20#include "llvm/Transforms/Utils/Local.h"
21using namespace llvm;
22
23STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
24STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
25
26/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
27/// some part of a constant global variable.  This intentionally only accepts
28/// constant expressions because we can't rewrite arbitrary instructions.
29static bool pointsToConstantGlobal(Value *V) {
30  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
31    return GV->isConstant();
32  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
33    if (CE->getOpcode() == Instruction::BitCast ||
34        CE->getOpcode() == Instruction::GetElementPtr)
35      return pointsToConstantGlobal(CE->getOperand(0));
36  return false;
37}
38
39/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
40/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
41/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
42/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
43/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
44/// the alloca, and if the source pointer is a pointer to a constant global, we
45/// can optimize this.
46static bool
47isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
48                               SmallVectorImpl<Instruction *> &ToDelete,
49                               bool IsOffset = false) {
50  // We track lifetime intrinsics as we encounter them.  If we decide to go
51  // ahead and replace the value with the global, this lets the caller quickly
52  // eliminate the markers.
53
54  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
55    User *U = cast<Instruction>(*UI);
56
57    if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
58      // Ignore non-volatile loads, they are always ok.
59      if (!LI->isSimple()) return false;
60      continue;
61    }
62
63    if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
64      // If uses of the bitcast are ok, we are ok.
65      if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
66        return false;
67      continue;
68    }
69    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
70      // If the GEP has all zero indices, it doesn't offset the pointer.  If it
71      // doesn't, it does.
72      if (!isOnlyCopiedFromConstantGlobal(
73              GEP, TheCopy, ToDelete, IsOffset || !GEP->hasAllZeroIndices()))
74        return false;
75      continue;
76    }
77
78    if (CallSite CS = U) {
79      // If this is the function being called then we treat it like a load and
80      // ignore it.
81      if (CS.isCallee(UI))
82        continue;
83
84      // If this is a readonly/readnone call site, then we know it is just a
85      // load (but one that potentially returns the value itself), so we can
86      // ignore it if we know that the value isn't captured.
87      unsigned ArgNo = CS.getArgumentNo(UI);
88      if (CS.onlyReadsMemory() &&
89          (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
90        continue;
91
92      // If this is being passed as a byval argument, the caller is making a
93      // copy, so it is only a read of the alloca.
94      if (CS.isByValArgument(ArgNo))
95        continue;
96    }
97
98    // Lifetime intrinsics can be handled by the caller.
99    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
100      if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
101          II->getIntrinsicID() == Intrinsic::lifetime_end) {
102        assert(II->use_empty() && "Lifetime markers have no result to use!");
103        ToDelete.push_back(II);
104        continue;
105      }
106    }
107
108    // If this is isn't our memcpy/memmove, reject it as something we can't
109    // handle.
110    MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
111    if (MI == 0)
112      return false;
113
114    // If the transfer is using the alloca as a source of the transfer, then
115    // ignore it since it is a load (unless the transfer is volatile).
116    if (UI.getOperandNo() == 1) {
117      if (MI->isVolatile()) return false;
118      continue;
119    }
120
121    // If we already have seen a copy, reject the second one.
122    if (TheCopy) return false;
123
124    // If the pointer has been offset from the start of the alloca, we can't
125    // safely handle this.
126    if (IsOffset) return false;
127
128    // If the memintrinsic isn't using the alloca as the dest, reject it.
129    if (UI.getOperandNo() != 0) return false;
130
131    // If the source of the memcpy/move is not a constant global, reject it.
132    if (!pointsToConstantGlobal(MI->getSource()))
133      return false;
134
135    // Otherwise, the transform is safe.  Remember the copy instruction.
136    TheCopy = MI;
137  }
138  return true;
139}
140
141/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
142/// modified by a copy from a constant global.  If we can prove this, we can
143/// replace any uses of the alloca with uses of the global directly.
144static MemTransferInst *
145isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
146                               SmallVectorImpl<Instruction *> &ToDelete) {
147  MemTransferInst *TheCopy = 0;
148  if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
149    return TheCopy;
150  return 0;
151}
152
153Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
154  // Ensure that the alloca array size argument has type intptr_t, so that
155  // any casting is exposed early.
156  if (TD) {
157    Type *IntPtrTy = TD->getIntPtrType(AI.getType());
158    if (AI.getArraySize()->getType() != IntPtrTy) {
159      Value *V = Builder->CreateIntCast(AI.getArraySize(),
160                                        IntPtrTy, false);
161      AI.setOperand(0, V);
162      return &AI;
163    }
164  }
165
166  // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
167  if (AI.isArrayAllocation()) {  // Check C != 1
168    if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
169      Type *NewTy =
170        ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
171      AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
172      New->setAlignment(AI.getAlignment());
173
174      // Scan to the end of the allocation instructions, to skip over a block of
175      // allocas if possible...also skip interleaved debug info
176      //
177      BasicBlock::iterator It = New;
178      while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
179
180      // Now that I is pointing to the first non-allocation-inst in the block,
181      // insert our getelementptr instruction...
182      //
183      Type *IdxTy = TD
184                  ? TD->getIntPtrType(AI.getType())
185                  : Type::getInt64Ty(AI.getContext());
186      Value *NullIdx = Constant::getNullValue(IdxTy);
187      Value *Idx[2] = { NullIdx, NullIdx };
188      Instruction *GEP =
189        GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
190      InsertNewInstBefore(GEP, *It);
191
192      // Now make everything use the getelementptr instead of the original
193      // allocation.
194      return ReplaceInstUsesWith(AI, GEP);
195    } else if (isa<UndefValue>(AI.getArraySize())) {
196      return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
197    }
198  }
199
200  if (TD && AI.getAllocatedType()->isSized()) {
201    // If the alignment is 0 (unspecified), assign it the preferred alignment.
202    if (AI.getAlignment() == 0)
203      AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
204
205    // Move all alloca's of zero byte objects to the entry block and merge them
206    // together.  Note that we only do this for alloca's, because malloc should
207    // allocate and return a unique pointer, even for a zero byte allocation.
208    if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
209      // For a zero sized alloca there is no point in doing an array allocation.
210      // This is helpful if the array size is a complicated expression not used
211      // elsewhere.
212      if (AI.isArrayAllocation()) {
213        AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
214        return &AI;
215      }
216
217      // Get the first instruction in the entry block.
218      BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
219      Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
220      if (FirstInst != &AI) {
221        // If the entry block doesn't start with a zero-size alloca then move
222        // this one to the start of the entry block.  There is no problem with
223        // dominance as the array size was forced to a constant earlier already.
224        AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
225        if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
226            TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
227          AI.moveBefore(FirstInst);
228          return &AI;
229        }
230
231        // If the alignment of the entry block alloca is 0 (unspecified),
232        // assign it the preferred alignment.
233        if (EntryAI->getAlignment() == 0)
234          EntryAI->setAlignment(
235            TD->getPrefTypeAlignment(EntryAI->getAllocatedType()));
236        // Replace this zero-sized alloca with the one at the start of the entry
237        // block after ensuring that the address will be aligned enough for both
238        // types.
239        unsigned MaxAlign = std::max(EntryAI->getAlignment(),
240                                     AI.getAlignment());
241        EntryAI->setAlignment(MaxAlign);
242        if (AI.getType() != EntryAI->getType())
243          return new BitCastInst(EntryAI, AI.getType());
244        return ReplaceInstUsesWith(AI, EntryAI);
245      }
246    }
247  }
248
249  if (AI.getAlignment()) {
250    // Check to see if this allocation is only modified by a memcpy/memmove from
251    // a constant global whose alignment is equal to or exceeds that of the
252    // allocation.  If this is the case, we can change all users to use
253    // the constant global instead.  This is commonly produced by the CFE by
254    // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
255    // is only subsequently read.
256    SmallVector<Instruction *, 4> ToDelete;
257    if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
258      unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
259                                                        AI.getAlignment(), TD);
260      if (AI.getAlignment() <= SourceAlign) {
261        DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
262        DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
263        for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
264          EraseInstFromFunction(*ToDelete[i]);
265        Constant *TheSrc = cast<Constant>(Copy->getSource());
266        Constant *Cast
267          = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
268        Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
269        EraseInstFromFunction(*Copy);
270        ++NumGlobalCopies;
271        return NewI;
272      }
273    }
274  }
275
276  // At last, use the generic allocation site handler to aggressively remove
277  // unused allocas.
278  return visitAllocSite(AI);
279}
280
281
282/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
283static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
284                                        const DataLayout *TD) {
285  User *CI = cast<User>(LI.getOperand(0));
286  Value *CastOp = CI->getOperand(0);
287
288  PointerType *DestTy = cast<PointerType>(CI->getType());
289  Type *DestPTy = DestTy->getElementType();
290  if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
291
292    // If the address spaces don't match, don't eliminate the cast.
293    if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
294      return 0;
295
296    Type *SrcPTy = SrcTy->getElementType();
297
298    if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
299         DestPTy->isVectorTy()) {
300      // If the source is an array, the code below will not succeed.  Check to
301      // see if a trivial 'gep P, 0, 0' will help matters.  Only do this for
302      // constants.
303      if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
304        if (Constant *CSrc = dyn_cast<Constant>(CastOp))
305          if (ASrcTy->getNumElements() != 0) {
306            Type *IdxTy = TD
307                        ? TD->getIntPtrType(SrcTy)
308                        : Type::getInt64Ty(SrcTy->getContext());
309            Value *Idx = Constant::getNullValue(IdxTy);
310            Value *Idxs[2] = { Idx, Idx };
311            CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
312            SrcTy = cast<PointerType>(CastOp->getType());
313            SrcPTy = SrcTy->getElementType();
314          }
315
316      if (IC.getDataLayout() &&
317          (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
318            SrcPTy->isVectorTy()) &&
319          // Do not allow turning this into a load of an integer, which is then
320          // casted to a pointer, this pessimizes pointer analysis a lot.
321          (SrcPTy->isPtrOrPtrVectorTy() ==
322           LI.getType()->isPtrOrPtrVectorTy()) &&
323          IC.getDataLayout()->getTypeSizeInBits(SrcPTy) ==
324               IC.getDataLayout()->getTypeSizeInBits(DestPTy)) {
325
326        // Okay, we are casting from one integer or pointer type to another of
327        // the same size.  Instead of casting the pointer before the load, cast
328        // the result of the loaded value.
329        LoadInst *NewLoad =
330          IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
331        NewLoad->setAlignment(LI.getAlignment());
332        NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
333        // Now cast the result of the load.
334        return new BitCastInst(NewLoad, LI.getType());
335      }
336    }
337  }
338  return 0;
339}
340
341Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
342  Value *Op = LI.getOperand(0);
343
344  // Attempt to improve the alignment.
345  if (TD) {
346    unsigned KnownAlign =
347      getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
348    unsigned LoadAlign = LI.getAlignment();
349    unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
350      TD->getABITypeAlignment(LI.getType());
351
352    if (KnownAlign > EffectiveLoadAlign)
353      LI.setAlignment(KnownAlign);
354    else if (LoadAlign == 0)
355      LI.setAlignment(EffectiveLoadAlign);
356  }
357
358  // load (cast X) --> cast (load X) iff safe.
359  if (isa<CastInst>(Op))
360    if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
361      return Res;
362
363  // None of the following transforms are legal for volatile/atomic loads.
364  // FIXME: Some of it is okay for atomic loads; needs refactoring.
365  if (!LI.isSimple()) return 0;
366
367  // Do really simple store-to-load forwarding and load CSE, to catch cases
368  // where there are several consecutive memory accesses to the same location,
369  // separated by a few arithmetic operations.
370  BasicBlock::iterator BBI = &LI;
371  if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
372    return ReplaceInstUsesWith(LI, AvailableVal);
373
374  // load(gep null, ...) -> unreachable
375  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
376    const Value *GEPI0 = GEPI->getOperand(0);
377    // TODO: Consider a target hook for valid address spaces for this xform.
378    if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
379      // Insert a new store to null instruction before the load to indicate
380      // that this code is not reachable.  We do this instead of inserting
381      // an unreachable instruction directly because we cannot modify the
382      // CFG.
383      new StoreInst(UndefValue::get(LI.getType()),
384                    Constant::getNullValue(Op->getType()), &LI);
385      return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
386    }
387  }
388
389  // load null/undef -> unreachable
390  // TODO: Consider a target hook for valid address spaces for this xform.
391  if (isa<UndefValue>(Op) ||
392      (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
393    // Insert a new store to null instruction before the load to indicate that
394    // this code is not reachable.  We do this instead of inserting an
395    // unreachable instruction directly because we cannot modify the CFG.
396    new StoreInst(UndefValue::get(LI.getType()),
397                  Constant::getNullValue(Op->getType()), &LI);
398    return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
399  }
400
401  // Instcombine load (constantexpr_cast global) -> cast (load global)
402  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
403    if (CE->isCast())
404      if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
405        return Res;
406
407  if (Op->hasOneUse()) {
408    // Change select and PHI nodes to select values instead of addresses: this
409    // helps alias analysis out a lot, allows many others simplifications, and
410    // exposes redundancy in the code.
411    //
412    // Note that we cannot do the transformation unless we know that the
413    // introduced loads cannot trap!  Something like this is valid as long as
414    // the condition is always false: load (select bool %C, int* null, int* %G),
415    // but it would not be valid if we transformed it to load from null
416    // unconditionally.
417    //
418    if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
419      // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
420      unsigned Align = LI.getAlignment();
421      if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
422          isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
423        LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
424                                           SI->getOperand(1)->getName()+".val");
425        LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
426                                           SI->getOperand(2)->getName()+".val");
427        V1->setAlignment(Align);
428        V2->setAlignment(Align);
429        return SelectInst::Create(SI->getCondition(), V1, V2);
430      }
431
432      // load (select (cond, null, P)) -> load P
433      if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
434        if (C->isNullValue()) {
435          LI.setOperand(0, SI->getOperand(2));
436          return &LI;
437        }
438
439      // load (select (cond, P, null)) -> load P
440      if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
441        if (C->isNullValue()) {
442          LI.setOperand(0, SI->getOperand(1));
443          return &LI;
444        }
445    }
446  }
447  return 0;
448}
449
450/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
451/// when possible.  This makes it generally easy to do alias analysis and/or
452/// SROA/mem2reg of the memory object.
453static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
454  User *CI = cast<User>(SI.getOperand(1));
455  Value *CastOp = CI->getOperand(0);
456
457  Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
458  PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
459  if (SrcTy == 0) return 0;
460
461  Type *SrcPTy = SrcTy->getElementType();
462
463  if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
464    return 0;
465
466  /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
467  /// to its first element.  This allows us to handle things like:
468  ///   store i32 xxx, (bitcast {foo*, float}* %P to i32*)
469  /// on 32-bit hosts.
470  SmallVector<Value*, 4> NewGEPIndices;
471
472  // If the source is an array, the code below will not succeed.  Check to
473  // see if a trivial 'gep P, 0, 0' will help matters.  Only do this for
474  // constants.
475  if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
476    // Index through pointer.
477    Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
478    NewGEPIndices.push_back(Zero);
479
480    while (1) {
481      if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
482        if (!STy->getNumElements()) /* Struct can be empty {} */
483          break;
484        NewGEPIndices.push_back(Zero);
485        SrcPTy = STy->getElementType(0);
486      } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
487        NewGEPIndices.push_back(Zero);
488        SrcPTy = ATy->getElementType();
489      } else {
490        break;
491      }
492    }
493
494    SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
495  }
496
497  if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
498    return 0;
499
500  // If the pointers point into different address spaces or if they point to
501  // values with different sizes, we can't do the transformation.
502  if (!IC.getDataLayout() ||
503      SrcTy->getAddressSpace() !=
504        cast<PointerType>(CI->getType())->getAddressSpace() ||
505      IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
506      IC.getDataLayout()->getTypeSizeInBits(DestPTy))
507    return 0;
508
509  // Okay, we are casting from one integer or pointer type to another of
510  // the same size.  Instead of casting the pointer before
511  // the store, cast the value to be stored.
512  Value *NewCast;
513  Value *SIOp0 = SI.getOperand(0);
514  Instruction::CastOps opcode = Instruction::BitCast;
515  Type* CastSrcTy = SIOp0->getType();
516  Type* CastDstTy = SrcPTy;
517  if (CastDstTy->isPointerTy()) {
518    if (CastSrcTy->isIntegerTy())
519      opcode = Instruction::IntToPtr;
520  } else if (CastDstTy->isIntegerTy()) {
521    if (SIOp0->getType()->isPointerTy())
522      opcode = Instruction::PtrToInt;
523  }
524
525  // SIOp0 is a pointer to aggregate and this is a store to the first field,
526  // emit a GEP to index into its first field.
527  if (!NewGEPIndices.empty())
528    CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
529
530  NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
531                                   SIOp0->getName()+".c");
532  SI.setOperand(0, NewCast);
533  SI.setOperand(1, CastOp);
534  return &SI;
535}
536
537/// equivalentAddressValues - Test if A and B will obviously have the same
538/// value. This includes recognizing that %t0 and %t1 will have the same
539/// value in code like this:
540///   %t0 = getelementptr \@a, 0, 3
541///   store i32 0, i32* %t0
542///   %t1 = getelementptr \@a, 0, 3
543///   %t2 = load i32* %t1
544///
545static bool equivalentAddressValues(Value *A, Value *B) {
546  // Test if the values are trivially equivalent.
547  if (A == B) return true;
548
549  // Test if the values come form identical arithmetic instructions.
550  // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
551  // its only used to compare two uses within the same basic block, which
552  // means that they'll always either have the same value or one of them
553  // will have an undefined value.
554  if (isa<BinaryOperator>(A) ||
555      isa<CastInst>(A) ||
556      isa<PHINode>(A) ||
557      isa<GetElementPtrInst>(A))
558    if (Instruction *BI = dyn_cast<Instruction>(B))
559      if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
560        return true;
561
562  // Otherwise they may not be equivalent.
563  return false;
564}
565
566Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
567  Value *Val = SI.getOperand(0);
568  Value *Ptr = SI.getOperand(1);
569
570  // Attempt to improve the alignment.
571  if (TD) {
572    unsigned KnownAlign =
573      getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
574                                 TD);
575    unsigned StoreAlign = SI.getAlignment();
576    unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
577      TD->getABITypeAlignment(Val->getType());
578
579    if (KnownAlign > EffectiveStoreAlign)
580      SI.setAlignment(KnownAlign);
581    else if (StoreAlign == 0)
582      SI.setAlignment(EffectiveStoreAlign);
583  }
584
585  // Don't hack volatile/atomic stores.
586  // FIXME: Some bits are legal for atomic stores; needs refactoring.
587  if (!SI.isSimple()) return 0;
588
589  // If the RHS is an alloca with a single use, zapify the store, making the
590  // alloca dead.
591  if (Ptr->hasOneUse()) {
592    if (isa<AllocaInst>(Ptr))
593      return EraseInstFromFunction(SI);
594    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
595      if (isa<AllocaInst>(GEP->getOperand(0))) {
596        if (GEP->getOperand(0)->hasOneUse())
597          return EraseInstFromFunction(SI);
598      }
599    }
600  }
601
602  // Do really simple DSE, to catch cases where there are several consecutive
603  // stores to the same location, separated by a few arithmetic operations. This
604  // situation often occurs with bitfield accesses.
605  BasicBlock::iterator BBI = &SI;
606  for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
607       --ScanInsts) {
608    --BBI;
609    // Don't count debug info directives, lest they affect codegen,
610    // and we skip pointer-to-pointer bitcasts, which are NOPs.
611    if (isa<DbgInfoIntrinsic>(BBI) ||
612        (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
613      ScanInsts++;
614      continue;
615    }
616
617    if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
618      // Prev store isn't volatile, and stores to the same location?
619      if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
620                                                        SI.getOperand(1))) {
621        ++NumDeadStore;
622        ++BBI;
623        EraseInstFromFunction(*PrevSI);
624        continue;
625      }
626      break;
627    }
628
629    // If this is a load, we have to stop.  However, if the loaded value is from
630    // the pointer we're loading and is producing the pointer we're storing,
631    // then *this* store is dead (X = load P; store X -> P).
632    if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
633      if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
634          LI->isSimple())
635        return EraseInstFromFunction(SI);
636
637      // Otherwise, this is a load from some other location.  Stores before it
638      // may not be dead.
639      break;
640    }
641
642    // Don't skip over loads or things that can modify memory.
643    if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
644      break;
645  }
646
647  // store X, null    -> turns into 'unreachable' in SimplifyCFG
648  if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
649    if (!isa<UndefValue>(Val)) {
650      SI.setOperand(0, UndefValue::get(Val->getType()));
651      if (Instruction *U = dyn_cast<Instruction>(Val))
652        Worklist.Add(U);  // Dropped a use.
653    }
654    return 0;  // Do not modify these!
655  }
656
657  // store undef, Ptr -> noop
658  if (isa<UndefValue>(Val))
659    return EraseInstFromFunction(SI);
660
661  // If the pointer destination is a cast, see if we can fold the cast into the
662  // source instead.
663  if (isa<CastInst>(Ptr))
664    if (Instruction *Res = InstCombineStoreToCast(*this, SI))
665      return Res;
666  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
667    if (CE->isCast())
668      if (Instruction *Res = InstCombineStoreToCast(*this, SI))
669        return Res;
670
671
672  // If this store is the last instruction in the basic block (possibly
673  // excepting debug info instructions), and if the block ends with an
674  // unconditional branch, try to move it to the successor block.
675  BBI = &SI;
676  do {
677    ++BBI;
678  } while (isa<DbgInfoIntrinsic>(BBI) ||
679           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
680  if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
681    if (BI->isUnconditional())
682      if (SimplifyStoreAtEndOfBlock(SI))
683        return 0;  // xform done!
684
685  return 0;
686}
687
688/// SimplifyStoreAtEndOfBlock - Turn things like:
689///   if () { *P = v1; } else { *P = v2 }
690/// into a phi node with a store in the successor.
691///
692/// Simplify things like:
693///   *P = v1; if () { *P = v2; }
694/// into a phi node with a store in the successor.
695///
696bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
697  BasicBlock *StoreBB = SI.getParent();
698
699  // Check to see if the successor block has exactly two incoming edges.  If
700  // so, see if the other predecessor contains a store to the same location.
701  // if so, insert a PHI node (if needed) and move the stores down.
702  BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
703
704  // Determine whether Dest has exactly two predecessors and, if so, compute
705  // the other predecessor.
706  pred_iterator PI = pred_begin(DestBB);
707  BasicBlock *P = *PI;
708  BasicBlock *OtherBB = 0;
709
710  if (P != StoreBB)
711    OtherBB = P;
712
713  if (++PI == pred_end(DestBB))
714    return false;
715
716  P = *PI;
717  if (P != StoreBB) {
718    if (OtherBB)
719      return false;
720    OtherBB = P;
721  }
722  if (++PI != pred_end(DestBB))
723    return false;
724
725  // Bail out if all the relevant blocks aren't distinct (this can happen,
726  // for example, if SI is in an infinite loop)
727  if (StoreBB == DestBB || OtherBB == DestBB)
728    return false;
729
730  // Verify that the other block ends in a branch and is not otherwise empty.
731  BasicBlock::iterator BBI = OtherBB->getTerminator();
732  BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
733  if (!OtherBr || BBI == OtherBB->begin())
734    return false;
735
736  // If the other block ends in an unconditional branch, check for the 'if then
737  // else' case.  there is an instruction before the branch.
738  StoreInst *OtherStore = 0;
739  if (OtherBr->isUnconditional()) {
740    --BBI;
741    // Skip over debugging info.
742    while (isa<DbgInfoIntrinsic>(BBI) ||
743           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
744      if (BBI==OtherBB->begin())
745        return false;
746      --BBI;
747    }
748    // If this isn't a store, isn't a store to the same location, or is not the
749    // right kind of store, bail out.
750    OtherStore = dyn_cast<StoreInst>(BBI);
751    if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
752        !SI.isSameOperationAs(OtherStore))
753      return false;
754  } else {
755    // Otherwise, the other block ended with a conditional branch. If one of the
756    // destinations is StoreBB, then we have the if/then case.
757    if (OtherBr->getSuccessor(0) != StoreBB &&
758        OtherBr->getSuccessor(1) != StoreBB)
759      return false;
760
761    // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
762    // if/then triangle.  See if there is a store to the same ptr as SI that
763    // lives in OtherBB.
764    for (;; --BBI) {
765      // Check to see if we find the matching store.
766      if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
767        if (OtherStore->getOperand(1) != SI.getOperand(1) ||
768            !SI.isSameOperationAs(OtherStore))
769          return false;
770        break;
771      }
772      // If we find something that may be using or overwriting the stored
773      // value, or if we run out of instructions, we can't do the xform.
774      if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
775          BBI == OtherBB->begin())
776        return false;
777    }
778
779    // In order to eliminate the store in OtherBr, we have to
780    // make sure nothing reads or overwrites the stored value in
781    // StoreBB.
782    for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
783      // FIXME: This should really be AA driven.
784      if (I->mayReadFromMemory() || I->mayWriteToMemory())
785        return false;
786    }
787  }
788
789  // Insert a PHI node now if we need it.
790  Value *MergedVal = OtherStore->getOperand(0);
791  if (MergedVal != SI.getOperand(0)) {
792    PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
793    PN->addIncoming(SI.getOperand(0), SI.getParent());
794    PN->addIncoming(OtherStore->getOperand(0), OtherBB);
795    MergedVal = InsertNewInstBefore(PN, DestBB->front());
796  }
797
798  // Advance to a place where it is safe to insert the new store and
799  // insert it.
800  BBI = DestBB->getFirstInsertionPt();
801  StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
802                                   SI.isVolatile(),
803                                   SI.getAlignment(),
804                                   SI.getOrdering(),
805                                   SI.getSynchScope());
806  InsertNewInstBefore(NewSI, *BBI);
807  NewSI->setDebugLoc(OtherStore->getDebugLoc());
808
809  // If the two stores had the same TBAA tag, preserve it.
810  if (MDNode *TBAATag = SI.getMetadata(LLVMContext::MD_tbaa))
811    if ((TBAATag = MDNode::getMostGenericTBAA(TBAATag,
812                               OtherStore->getMetadata(LLVMContext::MD_tbaa))))
813      NewSI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
814
815
816  // Nuke the old stores.
817  EraseInstFromFunction(SI);
818  EraseInstFromFunction(*OtherStore);
819  return true;
820}
821