1//===-- Local.cpp - Functions to perform local transformations ------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This family of functions perform various local transformations to the
11// program.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/Local.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/DenseSet.h"
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/EHPersonalities.h"
24#include "llvm/Analysis/InstructionSimplify.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/LazyValueInfo.h"
27#include "llvm/Analysis/ValueTracking.h"
28#include "llvm/IR/CFG.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DIBuilder.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/DebugInfo.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Dominators.h"
35#include "llvm/IR/GetElementPtrTypeIterator.h"
36#include "llvm/IR/GlobalAlias.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/IR/IRBuilder.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/MDBuilder.h"
43#include "llvm/IR/Metadata.h"
44#include "llvm/IR/Operator.h"
45#include "llvm/IR/PatternMatch.h"
46#include "llvm/IR/ValueHandle.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/raw_ostream.h"
50using namespace llvm;
51using namespace llvm::PatternMatch;
52
53#define DEBUG_TYPE "local"
54
55STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
56
57//===----------------------------------------------------------------------===//
58//  Local constant propagation.
59//
60
61/// ConstantFoldTerminator - If a terminator instruction is predicated on a
62/// constant value, convert it into an unconditional branch to the constant
63/// destination.  This is a nontrivial operation because the successors of this
64/// basic block must have their PHI nodes updated.
65/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
66/// conditions and indirectbr addresses this might make dead if
67/// DeleteDeadConditions is true.
68bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
69                                  const TargetLibraryInfo *TLI) {
70  TerminatorInst *T = BB->getTerminator();
71  IRBuilder<> Builder(T);
72
73  // Branch - See if we are conditional jumping on constant
74  if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
75    if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
76    BasicBlock *Dest1 = BI->getSuccessor(0);
77    BasicBlock *Dest2 = BI->getSuccessor(1);
78
79    if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
80      // Are we branching on constant?
81      // YES.  Change to unconditional branch...
82      BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
83      BasicBlock *OldDest     = Cond->getZExtValue() ? Dest2 : Dest1;
84
85      //cerr << "Function: " << T->getParent()->getParent()
86      //     << "\nRemoving branch from " << T->getParent()
87      //     << "\n\nTo: " << OldDest << endl;
88
89      // Let the basic block know that we are letting go of it.  Based on this,
90      // it will adjust it's PHI nodes.
91      OldDest->removePredecessor(BB);
92
93      // Replace the conditional branch with an unconditional one.
94      Builder.CreateBr(Destination);
95      BI->eraseFromParent();
96      return true;
97    }
98
99    if (Dest2 == Dest1) {       // Conditional branch to same location?
100      // This branch matches something like this:
101      //     br bool %cond, label %Dest, label %Dest
102      // and changes it into:  br label %Dest
103
104      // Let the basic block know that we are letting go of one copy of it.
105      assert(BI->getParent() && "Terminator not inserted in block!");
106      Dest1->removePredecessor(BI->getParent());
107
108      // Replace the conditional branch with an unconditional one.
109      Builder.CreateBr(Dest1);
110      Value *Cond = BI->getCondition();
111      BI->eraseFromParent();
112      if (DeleteDeadConditions)
113        RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
114      return true;
115    }
116    return false;
117  }
118
119  if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
120    // If we are switching on a constant, we can convert the switch to an
121    // unconditional branch.
122    ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
123    BasicBlock *DefaultDest = SI->getDefaultDest();
124    BasicBlock *TheOnlyDest = DefaultDest;
125
126    // If the default is unreachable, ignore it when searching for TheOnlyDest.
127    if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
128        SI->getNumCases() > 0) {
129      TheOnlyDest = SI->case_begin().getCaseSuccessor();
130    }
131
132    // Figure out which case it goes to.
133    for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
134         i != e; ++i) {
135      // Found case matching a constant operand?
136      if (i.getCaseValue() == CI) {
137        TheOnlyDest = i.getCaseSuccessor();
138        break;
139      }
140
141      // Check to see if this branch is going to the same place as the default
142      // dest.  If so, eliminate it as an explicit compare.
143      if (i.getCaseSuccessor() == DefaultDest) {
144        MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
145        unsigned NCases = SI->getNumCases();
146        // Fold the case metadata into the default if there will be any branches
147        // left, unless the metadata doesn't match the switch.
148        if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
149          // Collect branch weights into a vector.
150          SmallVector<uint32_t, 8> Weights;
151          for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
152               ++MD_i) {
153            auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
154            Weights.push_back(CI->getValue().getZExtValue());
155          }
156          // Merge weight of this case to the default weight.
157          unsigned idx = i.getCaseIndex();
158          Weights[0] += Weights[idx+1];
159          // Remove weight for this case.
160          std::swap(Weights[idx+1], Weights.back());
161          Weights.pop_back();
162          SI->setMetadata(LLVMContext::MD_prof,
163                          MDBuilder(BB->getContext()).
164                          createBranchWeights(Weights));
165        }
166        // Remove this entry.
167        DefaultDest->removePredecessor(SI->getParent());
168        SI->removeCase(i);
169        --i; --e;
170        continue;
171      }
172
173      // Otherwise, check to see if the switch only branches to one destination.
174      // We do this by reseting "TheOnlyDest" to null when we find two non-equal
175      // destinations.
176      if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = nullptr;
177    }
178
179    if (CI && !TheOnlyDest) {
180      // Branching on a constant, but not any of the cases, go to the default
181      // successor.
182      TheOnlyDest = SI->getDefaultDest();
183    }
184
185    // If we found a single destination that we can fold the switch into, do so
186    // now.
187    if (TheOnlyDest) {
188      // Insert the new branch.
189      Builder.CreateBr(TheOnlyDest);
190      BasicBlock *BB = SI->getParent();
191
192      // Remove entries from PHI nodes which we no longer branch to...
193      for (BasicBlock *Succ : SI->successors()) {
194        // Found case matching a constant operand?
195        if (Succ == TheOnlyDest)
196          TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
197        else
198          Succ->removePredecessor(BB);
199      }
200
201      // Delete the old switch.
202      Value *Cond = SI->getCondition();
203      SI->eraseFromParent();
204      if (DeleteDeadConditions)
205        RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
206      return true;
207    }
208
209    if (SI->getNumCases() == 1) {
210      // Otherwise, we can fold this switch into a conditional branch
211      // instruction if it has only one non-default destination.
212      SwitchInst::CaseIt FirstCase = SI->case_begin();
213      Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
214          FirstCase.getCaseValue(), "cond");
215
216      // Insert the new branch.
217      BranchInst *NewBr = Builder.CreateCondBr(Cond,
218                                               FirstCase.getCaseSuccessor(),
219                                               SI->getDefaultDest());
220      MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
221      if (MD && MD->getNumOperands() == 3) {
222        ConstantInt *SICase =
223            mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
224        ConstantInt *SIDef =
225            mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
226        assert(SICase && SIDef);
227        // The TrueWeight should be the weight for the single case of SI.
228        NewBr->setMetadata(LLVMContext::MD_prof,
229                        MDBuilder(BB->getContext()).
230                        createBranchWeights(SICase->getValue().getZExtValue(),
231                                            SIDef->getValue().getZExtValue()));
232      }
233
234      // Update make.implicit metadata to the newly-created conditional branch.
235      MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
236      if (MakeImplicitMD)
237        NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
238
239      // Delete the old switch.
240      SI->eraseFromParent();
241      return true;
242    }
243    return false;
244  }
245
246  if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) {
247    // indirectbr blockaddress(@F, @BB) -> br label @BB
248    if (BlockAddress *BA =
249          dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
250      BasicBlock *TheOnlyDest = BA->getBasicBlock();
251      // Insert the new branch.
252      Builder.CreateBr(TheOnlyDest);
253
254      for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
255        if (IBI->getDestination(i) == TheOnlyDest)
256          TheOnlyDest = nullptr;
257        else
258          IBI->getDestination(i)->removePredecessor(IBI->getParent());
259      }
260      Value *Address = IBI->getAddress();
261      IBI->eraseFromParent();
262      if (DeleteDeadConditions)
263        RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
264
265      // If we didn't find our destination in the IBI successor list, then we
266      // have undefined behavior.  Replace the unconditional branch with an
267      // 'unreachable' instruction.
268      if (TheOnlyDest) {
269        BB->getTerminator()->eraseFromParent();
270        new UnreachableInst(BB->getContext(), BB);
271      }
272
273      return true;
274    }
275  }
276
277  return false;
278}
279
280
281//===----------------------------------------------------------------------===//
282//  Local dead code elimination.
283//
284
285/// isInstructionTriviallyDead - Return true if the result produced by the
286/// instruction is not used, and the instruction has no side effects.
287///
288bool llvm::isInstructionTriviallyDead(Instruction *I,
289                                      const TargetLibraryInfo *TLI) {
290  if (!I->use_empty() || isa<TerminatorInst>(I)) return false;
291
292  // We don't want the landingpad-like instructions removed by anything this
293  // general.
294  if (I->isEHPad())
295    return false;
296
297  // We don't want debug info removed by anything this general, unless
298  // debug info is empty.
299  if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
300    if (DDI->getAddress())
301      return false;
302    return true;
303  }
304  if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
305    if (DVI->getValue())
306      return false;
307    return true;
308  }
309
310  if (!I->mayHaveSideEffects()) return true;
311
312  // Special case intrinsics that "may have side effects" but can be deleted
313  // when dead.
314  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
315    // Safe to delete llvm.stacksave if dead.
316    if (II->getIntrinsicID() == Intrinsic::stacksave)
317      return true;
318
319    // Lifetime intrinsics are dead when their right-hand is undef.
320    if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
321        II->getIntrinsicID() == Intrinsic::lifetime_end)
322      return isa<UndefValue>(II->getArgOperand(1));
323
324    // Assumptions are dead if their condition is trivially true.  Guards on
325    // true are operationally no-ops.  In the future we can consider more
326    // sophisticated tradeoffs for guards considering potential for check
327    // widening, but for now we keep things simple.
328    if (II->getIntrinsicID() == Intrinsic::assume ||
329        II->getIntrinsicID() == Intrinsic::experimental_guard) {
330      if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
331        return !Cond->isZero();
332
333      return false;
334    }
335  }
336
337  if (isAllocLikeFn(I, TLI)) return true;
338
339  if (CallInst *CI = isFreeCall(I, TLI))
340    if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
341      return C->isNullValue() || isa<UndefValue>(C);
342
343  return false;
344}
345
346/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
347/// trivially dead instruction, delete it.  If that makes any of its operands
348/// trivially dead, delete them too, recursively.  Return true if any
349/// instructions were deleted.
350bool
351llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
352                                                 const TargetLibraryInfo *TLI) {
353  Instruction *I = dyn_cast<Instruction>(V);
354  if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
355    return false;
356
357  SmallVector<Instruction*, 16> DeadInsts;
358  DeadInsts.push_back(I);
359
360  do {
361    I = DeadInsts.pop_back_val();
362
363    // Null out all of the instruction's operands to see if any operand becomes
364    // dead as we go.
365    for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
366      Value *OpV = I->getOperand(i);
367      I->setOperand(i, nullptr);
368
369      if (!OpV->use_empty()) continue;
370
371      // If the operand is an instruction that became dead as we nulled out the
372      // operand, and if it is 'trivially' dead, delete it in a future loop
373      // iteration.
374      if (Instruction *OpI = dyn_cast<Instruction>(OpV))
375        if (isInstructionTriviallyDead(OpI, TLI))
376          DeadInsts.push_back(OpI);
377    }
378
379    I->eraseFromParent();
380  } while (!DeadInsts.empty());
381
382  return true;
383}
384
385/// areAllUsesEqual - Check whether the uses of a value are all the same.
386/// This is similar to Instruction::hasOneUse() except this will also return
387/// true when there are no uses or multiple uses that all refer to the same
388/// value.
389static bool areAllUsesEqual(Instruction *I) {
390  Value::user_iterator UI = I->user_begin();
391  Value::user_iterator UE = I->user_end();
392  if (UI == UE)
393    return true;
394
395  User *TheUse = *UI;
396  for (++UI; UI != UE; ++UI) {
397    if (*UI != TheUse)
398      return false;
399  }
400  return true;
401}
402
403/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
404/// dead PHI node, due to being a def-use chain of single-use nodes that
405/// either forms a cycle or is terminated by a trivially dead instruction,
406/// delete it.  If that makes any of its operands trivially dead, delete them
407/// too, recursively.  Return true if a change was made.
408bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
409                                        const TargetLibraryInfo *TLI) {
410  SmallPtrSet<Instruction*, 4> Visited;
411  for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
412       I = cast<Instruction>(*I->user_begin())) {
413    if (I->use_empty())
414      return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
415
416    // If we find an instruction more than once, we're on a cycle that
417    // won't prove fruitful.
418    if (!Visited.insert(I).second) {
419      // Break the cycle and delete the instruction and its operands.
420      I->replaceAllUsesWith(UndefValue::get(I->getType()));
421      (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
422      return true;
423    }
424  }
425  return false;
426}
427
428static bool
429simplifyAndDCEInstruction(Instruction *I,
430                          SmallSetVector<Instruction *, 16> &WorkList,
431                          const DataLayout &DL,
432                          const TargetLibraryInfo *TLI) {
433  if (isInstructionTriviallyDead(I, TLI)) {
434    // Null out all of the instruction's operands to see if any operand becomes
435    // dead as we go.
436    for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
437      Value *OpV = I->getOperand(i);
438      I->setOperand(i, nullptr);
439
440      if (!OpV->use_empty() || I == OpV)
441        continue;
442
443      // If the operand is an instruction that became dead as we nulled out the
444      // operand, and if it is 'trivially' dead, delete it in a future loop
445      // iteration.
446      if (Instruction *OpI = dyn_cast<Instruction>(OpV))
447        if (isInstructionTriviallyDead(OpI, TLI))
448          WorkList.insert(OpI);
449    }
450
451    I->eraseFromParent();
452
453    return true;
454  }
455
456  if (Value *SimpleV = SimplifyInstruction(I, DL)) {
457    // Add the users to the worklist. CAREFUL: an instruction can use itself,
458    // in the case of a phi node.
459    for (User *U : I->users()) {
460      if (U != I) {
461        WorkList.insert(cast<Instruction>(U));
462      }
463    }
464
465    // Replace the instruction with its simplified value.
466    bool Changed = false;
467    if (!I->use_empty()) {
468      I->replaceAllUsesWith(SimpleV);
469      Changed = true;
470    }
471    if (isInstructionTriviallyDead(I, TLI)) {
472      I->eraseFromParent();
473      Changed = true;
474    }
475    return Changed;
476  }
477  return false;
478}
479
480/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
481/// simplify any instructions in it and recursively delete dead instructions.
482///
483/// This returns true if it changed the code, note that it can delete
484/// instructions in other blocks as well in this block.
485bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
486                                       const TargetLibraryInfo *TLI) {
487  bool MadeChange = false;
488  const DataLayout &DL = BB->getModule()->getDataLayout();
489
490#ifndef NDEBUG
491  // In debug builds, ensure that the terminator of the block is never replaced
492  // or deleted by these simplifications. The idea of simplification is that it
493  // cannot introduce new instructions, and there is no way to replace the
494  // terminator of a block without introducing a new instruction.
495  AssertingVH<Instruction> TerminatorVH(&BB->back());
496#endif
497
498  SmallSetVector<Instruction *, 16> WorkList;
499  // Iterate over the original function, only adding insts to the worklist
500  // if they actually need to be revisited. This avoids having to pre-init
501  // the worklist with the entire function's worth of instructions.
502  for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
503       BI != E;) {
504    assert(!BI->isTerminator());
505    Instruction *I = &*BI;
506    ++BI;
507
508    // We're visiting this instruction now, so make sure it's not in the
509    // worklist from an earlier visit.
510    if (!WorkList.count(I))
511      MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
512  }
513
514  while (!WorkList.empty()) {
515    Instruction *I = WorkList.pop_back_val();
516    MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
517  }
518  return MadeChange;
519}
520
521//===----------------------------------------------------------------------===//
522//  Control Flow Graph Restructuring.
523//
524
525
526/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
527/// method is called when we're about to delete Pred as a predecessor of BB.  If
528/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
529///
530/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
531/// nodes that collapse into identity values.  For example, if we have:
532///   x = phi(1, 0, 0, 0)
533///   y = and x, z
534///
535/// .. and delete the predecessor corresponding to the '1', this will attempt to
536/// recursively fold the and to 0.
537void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
538  // This only adjusts blocks with PHI nodes.
539  if (!isa<PHINode>(BB->begin()))
540    return;
541
542  // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
543  // them down.  This will leave us with single entry phi nodes and other phis
544  // that can be removed.
545  BB->removePredecessor(Pred, true);
546
547  WeakVH PhiIt = &BB->front();
548  while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
549    PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
550    Value *OldPhiIt = PhiIt;
551
552    if (!recursivelySimplifyInstruction(PN))
553      continue;
554
555    // If recursive simplification ended up deleting the next PHI node we would
556    // iterate to, then our iterator is invalid, restart scanning from the top
557    // of the block.
558    if (PhiIt != OldPhiIt) PhiIt = &BB->front();
559  }
560}
561
562
563/// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
564/// predecessor is known to have one successor (DestBB!).  Eliminate the edge
565/// between them, moving the instructions in the predecessor into DestBB and
566/// deleting the predecessor block.
567///
568void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
569  // If BB has single-entry PHI nodes, fold them.
570  while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
571    Value *NewVal = PN->getIncomingValue(0);
572    // Replace self referencing PHI with undef, it must be dead.
573    if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
574    PN->replaceAllUsesWith(NewVal);
575    PN->eraseFromParent();
576  }
577
578  BasicBlock *PredBB = DestBB->getSinglePredecessor();
579  assert(PredBB && "Block doesn't have a single predecessor!");
580
581  // Zap anything that took the address of DestBB.  Not doing this will give the
582  // address an invalid value.
583  if (DestBB->hasAddressTaken()) {
584    BlockAddress *BA = BlockAddress::get(DestBB);
585    Constant *Replacement =
586      ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1);
587    BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
588                                                     BA->getType()));
589    BA->destroyConstant();
590  }
591
592  // Anything that branched to PredBB now branches to DestBB.
593  PredBB->replaceAllUsesWith(DestBB);
594
595  // Splice all the instructions from PredBB to DestBB.
596  PredBB->getTerminator()->eraseFromParent();
597  DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
598
599  // If the PredBB is the entry block of the function, move DestBB up to
600  // become the entry block after we erase PredBB.
601  if (PredBB == &DestBB->getParent()->getEntryBlock())
602    DestBB->moveAfter(PredBB);
603
604  if (DT) {
605    BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock();
606    DT->changeImmediateDominator(DestBB, PredBBIDom);
607    DT->eraseNode(PredBB);
608  }
609  // Nuke BB.
610  PredBB->eraseFromParent();
611}
612
613/// CanMergeValues - Return true if we can choose one of these values to use
614/// in place of the other. Note that we will always choose the non-undef
615/// value to keep.
616static bool CanMergeValues(Value *First, Value *Second) {
617  return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
618}
619
620/// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
621/// almost-empty BB ending in an unconditional branch to Succ, into Succ.
622///
623/// Assumption: Succ is the single successor for BB.
624///
625static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
626  assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
627
628  DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
629        << Succ->getName() << "\n");
630  // Shortcut, if there is only a single predecessor it must be BB and merging
631  // is always safe
632  if (Succ->getSinglePredecessor()) return true;
633
634  // Make a list of the predecessors of BB
635  SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
636
637  // Look at all the phi nodes in Succ, to see if they present a conflict when
638  // merging these blocks
639  for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
640    PHINode *PN = cast<PHINode>(I);
641
642    // If the incoming value from BB is again a PHINode in
643    // BB which has the same incoming value for *PI as PN does, we can
644    // merge the phi nodes and then the blocks can still be merged
645    PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
646    if (BBPN && BBPN->getParent() == BB) {
647      for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
648        BasicBlock *IBB = PN->getIncomingBlock(PI);
649        if (BBPreds.count(IBB) &&
650            !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
651                            PN->getIncomingValue(PI))) {
652          DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
653                << Succ->getName() << " is conflicting with "
654                << BBPN->getName() << " with regard to common predecessor "
655                << IBB->getName() << "\n");
656          return false;
657        }
658      }
659    } else {
660      Value* Val = PN->getIncomingValueForBlock(BB);
661      for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
662        // See if the incoming value for the common predecessor is equal to the
663        // one for BB, in which case this phi node will not prevent the merging
664        // of the block.
665        BasicBlock *IBB = PN->getIncomingBlock(PI);
666        if (BBPreds.count(IBB) &&
667            !CanMergeValues(Val, PN->getIncomingValue(PI))) {
668          DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
669                << Succ->getName() << " is conflicting with regard to common "
670                << "predecessor " << IBB->getName() << "\n");
671          return false;
672        }
673      }
674    }
675  }
676
677  return true;
678}
679
680typedef SmallVector<BasicBlock *, 16> PredBlockVector;
681typedef DenseMap<BasicBlock *, Value *> IncomingValueMap;
682
683/// \brief Determines the value to use as the phi node input for a block.
684///
685/// Select between \p OldVal any value that we know flows from \p BB
686/// to a particular phi on the basis of which one (if either) is not
687/// undef. Update IncomingValues based on the selected value.
688///
689/// \param OldVal The value we are considering selecting.
690/// \param BB The block that the value flows in from.
691/// \param IncomingValues A map from block-to-value for other phi inputs
692/// that we have examined.
693///
694/// \returns the selected value.
695static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
696                                          IncomingValueMap &IncomingValues) {
697  if (!isa<UndefValue>(OldVal)) {
698    assert((!IncomingValues.count(BB) ||
699            IncomingValues.find(BB)->second == OldVal) &&
700           "Expected OldVal to match incoming value from BB!");
701
702    IncomingValues.insert(std::make_pair(BB, OldVal));
703    return OldVal;
704  }
705
706  IncomingValueMap::const_iterator It = IncomingValues.find(BB);
707  if (It != IncomingValues.end()) return It->second;
708
709  return OldVal;
710}
711
712/// \brief Create a map from block to value for the operands of a
713/// given phi.
714///
715/// Create a map from block to value for each non-undef value flowing
716/// into \p PN.
717///
718/// \param PN The phi we are collecting the map for.
719/// \param IncomingValues [out] The map from block to value for this phi.
720static void gatherIncomingValuesToPhi(PHINode *PN,
721                                      IncomingValueMap &IncomingValues) {
722  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
723    BasicBlock *BB = PN->getIncomingBlock(i);
724    Value *V = PN->getIncomingValue(i);
725
726    if (!isa<UndefValue>(V))
727      IncomingValues.insert(std::make_pair(BB, V));
728  }
729}
730
731/// \brief Replace the incoming undef values to a phi with the values
732/// from a block-to-value map.
733///
734/// \param PN The phi we are replacing the undefs in.
735/// \param IncomingValues A map from block to value.
736static void replaceUndefValuesInPhi(PHINode *PN,
737                                    const IncomingValueMap &IncomingValues) {
738  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
739    Value *V = PN->getIncomingValue(i);
740
741    if (!isa<UndefValue>(V)) continue;
742
743    BasicBlock *BB = PN->getIncomingBlock(i);
744    IncomingValueMap::const_iterator It = IncomingValues.find(BB);
745    if (It == IncomingValues.end()) continue;
746
747    PN->setIncomingValue(i, It->second);
748  }
749}
750
751/// \brief Replace a value flowing from a block to a phi with
752/// potentially multiple instances of that value flowing from the
753/// block's predecessors to the phi.
754///
755/// \param BB The block with the value flowing into the phi.
756/// \param BBPreds The predecessors of BB.
757/// \param PN The phi that we are updating.
758static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
759                                                const PredBlockVector &BBPreds,
760                                                PHINode *PN) {
761  Value *OldVal = PN->removeIncomingValue(BB, false);
762  assert(OldVal && "No entry in PHI for Pred BB!");
763
764  IncomingValueMap IncomingValues;
765
766  // We are merging two blocks - BB, and the block containing PN - and
767  // as a result we need to redirect edges from the predecessors of BB
768  // to go to the block containing PN, and update PN
769  // accordingly. Since we allow merging blocks in the case where the
770  // predecessor and successor blocks both share some predecessors,
771  // and where some of those common predecessors might have undef
772  // values flowing into PN, we want to rewrite those values to be
773  // consistent with the non-undef values.
774
775  gatherIncomingValuesToPhi(PN, IncomingValues);
776
777  // If this incoming value is one of the PHI nodes in BB, the new entries
778  // in the PHI node are the entries from the old PHI.
779  if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
780    PHINode *OldValPN = cast<PHINode>(OldVal);
781    for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
782      // Note that, since we are merging phi nodes and BB and Succ might
783      // have common predecessors, we could end up with a phi node with
784      // identical incoming branches. This will be cleaned up later (and
785      // will trigger asserts if we try to clean it up now, without also
786      // simplifying the corresponding conditional branch).
787      BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
788      Value *PredVal = OldValPN->getIncomingValue(i);
789      Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
790                                                    IncomingValues);
791
792      // And add a new incoming value for this predecessor for the
793      // newly retargeted branch.
794      PN->addIncoming(Selected, PredBB);
795    }
796  } else {
797    for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
798      // Update existing incoming values in PN for this
799      // predecessor of BB.
800      BasicBlock *PredBB = BBPreds[i];
801      Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
802                                                    IncomingValues);
803
804      // And add a new incoming value for this predecessor for the
805      // newly retargeted branch.
806      PN->addIncoming(Selected, PredBB);
807    }
808  }
809
810  replaceUndefValuesInPhi(PN, IncomingValues);
811}
812
813/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
814/// unconditional branch, and contains no instructions other than PHI nodes,
815/// potential side-effect free intrinsics and the branch.  If possible,
816/// eliminate BB by rewriting all the predecessors to branch to the successor
817/// block and return true.  If we can't transform, return false.
818bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
819  assert(BB != &BB->getParent()->getEntryBlock() &&
820         "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
821
822  // We can't eliminate infinite loops.
823  BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
824  if (BB == Succ) return false;
825
826  // Check to see if merging these blocks would cause conflicts for any of the
827  // phi nodes in BB or Succ. If not, we can safely merge.
828  if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
829
830  // Check for cases where Succ has multiple predecessors and a PHI node in BB
831  // has uses which will not disappear when the PHI nodes are merged.  It is
832  // possible to handle such cases, but difficult: it requires checking whether
833  // BB dominates Succ, which is non-trivial to calculate in the case where
834  // Succ has multiple predecessors.  Also, it requires checking whether
835  // constructing the necessary self-referential PHI node doesn't introduce any
836  // conflicts; this isn't too difficult, but the previous code for doing this
837  // was incorrect.
838  //
839  // Note that if this check finds a live use, BB dominates Succ, so BB is
840  // something like a loop pre-header (or rarely, a part of an irreducible CFG);
841  // folding the branch isn't profitable in that case anyway.
842  if (!Succ->getSinglePredecessor()) {
843    BasicBlock::iterator BBI = BB->begin();
844    while (isa<PHINode>(*BBI)) {
845      for (Use &U : BBI->uses()) {
846        if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
847          if (PN->getIncomingBlock(U) != BB)
848            return false;
849        } else {
850          return false;
851        }
852      }
853      ++BBI;
854    }
855  }
856
857  DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
858
859  if (isa<PHINode>(Succ->begin())) {
860    // If there is more than one pred of succ, and there are PHI nodes in
861    // the successor, then we need to add incoming edges for the PHI nodes
862    //
863    const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
864
865    // Loop over all of the PHI nodes in the successor of BB.
866    for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
867      PHINode *PN = cast<PHINode>(I);
868
869      redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
870    }
871  }
872
873  if (Succ->getSinglePredecessor()) {
874    // BB is the only predecessor of Succ, so Succ will end up with exactly
875    // the same predecessors BB had.
876
877    // Copy over any phi, debug or lifetime instruction.
878    BB->getTerminator()->eraseFromParent();
879    Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
880                               BB->getInstList());
881  } else {
882    while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
883      // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
884      assert(PN->use_empty() && "There shouldn't be any uses here!");
885      PN->eraseFromParent();
886    }
887  }
888
889  // Everything that jumped to BB now goes to Succ.
890  BB->replaceAllUsesWith(Succ);
891  if (!Succ->hasName()) Succ->takeName(BB);
892  BB->eraseFromParent();              // Delete the old basic block.
893  return true;
894}
895
896/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
897/// nodes in this block. This doesn't try to be clever about PHI nodes
898/// which differ only in the order of the incoming values, but instcombine
899/// orders them so it usually won't matter.
900///
901bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
902  // This implementation doesn't currently consider undef operands
903  // specially. Theoretically, two phis which are identical except for
904  // one having an undef where the other doesn't could be collapsed.
905
906  struct PHIDenseMapInfo {
907    static PHINode *getEmptyKey() {
908      return DenseMapInfo<PHINode *>::getEmptyKey();
909    }
910    static PHINode *getTombstoneKey() {
911      return DenseMapInfo<PHINode *>::getTombstoneKey();
912    }
913    static unsigned getHashValue(PHINode *PN) {
914      // Compute a hash value on the operands. Instcombine will likely have
915      // sorted them, which helps expose duplicates, but we have to check all
916      // the operands to be safe in case instcombine hasn't run.
917      return static_cast<unsigned>(hash_combine(
918          hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
919          hash_combine_range(PN->block_begin(), PN->block_end())));
920    }
921    static bool isEqual(PHINode *LHS, PHINode *RHS) {
922      if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
923          RHS == getEmptyKey() || RHS == getTombstoneKey())
924        return LHS == RHS;
925      return LHS->isIdenticalTo(RHS);
926    }
927  };
928
929  // Set of unique PHINodes.
930  DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
931
932  // Examine each PHI.
933  bool Changed = false;
934  for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
935    auto Inserted = PHISet.insert(PN);
936    if (!Inserted.second) {
937      // A duplicate. Replace this PHI with its duplicate.
938      PN->replaceAllUsesWith(*Inserted.first);
939      PN->eraseFromParent();
940      Changed = true;
941
942      // The RAUW can change PHIs that we already visited. Start over from the
943      // beginning.
944      PHISet.clear();
945      I = BB->begin();
946    }
947  }
948
949  return Changed;
950}
951
952/// enforceKnownAlignment - If the specified pointer points to an object that
953/// we control, modify the object's alignment to PrefAlign. This isn't
954/// often possible though. If alignment is important, a more reliable approach
955/// is to simply align all global variables and allocation instructions to
956/// their preferred alignment from the beginning.
957///
958static unsigned enforceKnownAlignment(Value *V, unsigned Align,
959                                      unsigned PrefAlign,
960                                      const DataLayout &DL) {
961  assert(PrefAlign > Align);
962
963  V = V->stripPointerCasts();
964
965  if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
966    // TODO: ideally, computeKnownBits ought to have used
967    // AllocaInst::getAlignment() in its computation already, making
968    // the below max redundant. But, as it turns out,
969    // stripPointerCasts recurses through infinite layers of bitcasts,
970    // while computeKnownBits is not allowed to traverse more than 6
971    // levels.
972    Align = std::max(AI->getAlignment(), Align);
973    if (PrefAlign <= Align)
974      return Align;
975
976    // If the preferred alignment is greater than the natural stack alignment
977    // then don't round up. This avoids dynamic stack realignment.
978    if (DL.exceedsNaturalStackAlignment(PrefAlign))
979      return Align;
980    AI->setAlignment(PrefAlign);
981    return PrefAlign;
982  }
983
984  if (auto *GO = dyn_cast<GlobalObject>(V)) {
985    // TODO: as above, this shouldn't be necessary.
986    Align = std::max(GO->getAlignment(), Align);
987    if (PrefAlign <= Align)
988      return Align;
989
990    // If there is a large requested alignment and we can, bump up the alignment
991    // of the global.  If the memory we set aside for the global may not be the
992    // memory used by the final program then it is impossible for us to reliably
993    // enforce the preferred alignment.
994    if (!GO->canIncreaseAlignment())
995      return Align;
996
997    GO->setAlignment(PrefAlign);
998    return PrefAlign;
999  }
1000
1001  return Align;
1002}
1003
1004/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
1005/// we can determine, return it, otherwise return 0.  If PrefAlign is specified,
1006/// and it is more than the alignment of the ultimate object, see if we can
1007/// increase the alignment of the ultimate object, making this check succeed.
1008unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1009                                          const DataLayout &DL,
1010                                          const Instruction *CxtI,
1011                                          AssumptionCache *AC,
1012                                          const DominatorTree *DT) {
1013  assert(V->getType()->isPointerTy() &&
1014         "getOrEnforceKnownAlignment expects a pointer!");
1015  unsigned BitWidth = DL.getPointerTypeSizeInBits(V->getType());
1016
1017  APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
1018  computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC, CxtI, DT);
1019  unsigned TrailZ = KnownZero.countTrailingOnes();
1020
1021  // Avoid trouble with ridiculously large TrailZ values, such as
1022  // those computed from a null pointer.
1023  TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
1024
1025  unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
1026
1027  // LLVM doesn't support alignments larger than this currently.
1028  Align = std::min(Align, +Value::MaximumAlignment);
1029
1030  if (PrefAlign > Align)
1031    Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1032
1033  // We don't need to make any adjustment.
1034  return Align;
1035}
1036
1037///===---------------------------------------------------------------------===//
1038///  Dbg Intrinsic utilities
1039///
1040
1041/// See if there is a dbg.value intrinsic for DIVar before I.
1042static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1043                              Instruction *I) {
1044  // Since we can't guarantee that the original dbg.declare instrinsic
1045  // is removed by LowerDbgDeclare(), we need to make sure that we are
1046  // not inserting the same dbg.value intrinsic over and over.
1047  llvm::BasicBlock::InstListType::iterator PrevI(I);
1048  if (PrevI != I->getParent()->getInstList().begin()) {
1049    --PrevI;
1050    if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1051      if (DVI->getValue() == I->getOperand(0) &&
1052          DVI->getOffset() == 0 &&
1053          DVI->getVariable() == DIVar &&
1054          DVI->getExpression() == DIExpr)
1055        return true;
1056  }
1057  return false;
1058}
1059
1060/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1061/// that has an associated llvm.dbg.decl intrinsic.
1062bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
1063                                           StoreInst *SI, DIBuilder &Builder) {
1064  auto *DIVar = DDI->getVariable();
1065  auto *DIExpr = DDI->getExpression();
1066  assert(DIVar && "Missing variable");
1067
1068  // If an argument is zero extended then use argument directly. The ZExt
1069  // may be zapped by an optimization pass in future.
1070  Argument *ExtendedArg = nullptr;
1071  if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1072    ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1073  if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1074    ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1075  if (ExtendedArg) {
1076    // We're now only describing a subset of the variable. The piece we're
1077    // describing will always be smaller than the variable size, because
1078    // VariableSize == Size of Alloca described by DDI. Since SI stores
1079    // to the alloca described by DDI, if it's first operand is an extend,
1080    // we're guaranteed that before extension, the value was narrower than
1081    // the size of the alloca, hence the size of the described variable.
1082    SmallVector<uint64_t, 3> Ops;
1083    unsigned PieceOffset = 0;
1084    // If this already is a bit piece, we drop the bit piece from the expression
1085    // and record the offset.
1086    if (DIExpr->isBitPiece()) {
1087      Ops.append(DIExpr->elements_begin(), DIExpr->elements_end()-3);
1088      PieceOffset = DIExpr->getBitPieceOffset();
1089    } else {
1090      Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
1091    }
1092    Ops.push_back(dwarf::DW_OP_bit_piece);
1093    Ops.push_back(PieceOffset); // Offset
1094    const DataLayout &DL = DDI->getModule()->getDataLayout();
1095    Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType())); // Size
1096    auto NewDIExpr = Builder.createExpression(Ops);
1097    if (!LdStHasDebugValue(DIVar, NewDIExpr, SI))
1098      Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, NewDIExpr,
1099                                      DDI->getDebugLoc(), SI);
1100  } else if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1101    Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, DIExpr,
1102                                    DDI->getDebugLoc(), SI);
1103  return true;
1104}
1105
1106/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1107/// that has an associated llvm.dbg.decl intrinsic.
1108bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
1109                                           LoadInst *LI, DIBuilder &Builder) {
1110  auto *DIVar = DDI->getVariable();
1111  auto *DIExpr = DDI->getExpression();
1112  assert(DIVar && "Missing variable");
1113
1114  if (LdStHasDebugValue(DIVar, DIExpr, LI))
1115    return true;
1116
1117  // We are now tracking the loaded value instead of the address. In the
1118  // future if multi-location support is added to the IR, it might be
1119  // preferable to keep tracking both the loaded value and the original
1120  // address in case the alloca can not be elided.
1121  Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1122      LI, 0, DIVar, DIExpr, DDI->getDebugLoc(), (Instruction *)nullptr);
1123  DbgValue->insertAfter(LI);
1124  return true;
1125}
1126
1127/// Determine whether this alloca is either a VLA or an array.
1128static bool isArray(AllocaInst *AI) {
1129  return AI->isArrayAllocation() ||
1130    AI->getType()->getElementType()->isArrayTy();
1131}
1132
1133/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1134/// of llvm.dbg.value intrinsics.
1135bool llvm::LowerDbgDeclare(Function &F) {
1136  DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1137  SmallVector<DbgDeclareInst *, 4> Dbgs;
1138  for (auto &FI : F)
1139    for (Instruction &BI : FI)
1140      if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1141        Dbgs.push_back(DDI);
1142
1143  if (Dbgs.empty())
1144    return false;
1145
1146  for (auto &I : Dbgs) {
1147    DbgDeclareInst *DDI = I;
1148    AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1149    // If this is an alloca for a scalar variable, insert a dbg.value
1150    // at each load and store to the alloca and erase the dbg.declare.
1151    // The dbg.values allow tracking a variable even if it is not
1152    // stored on the stack, while the dbg.declare can only describe
1153    // the stack slot (and at a lexical-scope granularity). Later
1154    // passes will attempt to elide the stack slot.
1155    if (AI && !isArray(AI)) {
1156      for (auto &AIUse : AI->uses()) {
1157        User *U = AIUse.getUser();
1158        if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1159          if (AIUse.getOperandNo() == 1)
1160            ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1161        } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1162          ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1163        } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1164          // This is a call by-value or some other instruction that
1165          // takes a pointer to the variable. Insert a *value*
1166          // intrinsic that describes the alloca.
1167          SmallVector<uint64_t, 1> NewDIExpr;
1168          auto *DIExpr = DDI->getExpression();
1169          NewDIExpr.push_back(dwarf::DW_OP_deref);
1170          NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
1171          DIB.insertDbgValueIntrinsic(AI, 0, DDI->getVariable(),
1172                                      DIB.createExpression(NewDIExpr),
1173                                      DDI->getDebugLoc(), CI);
1174        }
1175      }
1176      DDI->eraseFromParent();
1177    }
1178  }
1179  return true;
1180}
1181
1182/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the
1183/// alloca 'V', if any.
1184DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
1185  if (auto *L = LocalAsMetadata::getIfExists(V))
1186    if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1187      for (User *U : MDV->users())
1188        if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
1189          return DDI;
1190
1191  return nullptr;
1192}
1193
1194static void DIExprAddDeref(SmallVectorImpl<uint64_t> &Expr) {
1195  Expr.push_back(dwarf::DW_OP_deref);
1196}
1197
1198static void DIExprAddOffset(SmallVectorImpl<uint64_t> &Expr, int Offset) {
1199  if (Offset > 0) {
1200    Expr.push_back(dwarf::DW_OP_plus);
1201    Expr.push_back(Offset);
1202  } else if (Offset < 0) {
1203    Expr.push_back(dwarf::DW_OP_minus);
1204    Expr.push_back(-Offset);
1205  }
1206}
1207
1208static DIExpression *BuildReplacementDIExpr(DIBuilder &Builder,
1209                                            DIExpression *DIExpr, bool Deref,
1210                                            int Offset) {
1211  if (!Deref && !Offset)
1212    return DIExpr;
1213  // Create a copy of the original DIDescriptor for user variable, prepending
1214  // "deref" operation to a list of address elements, as new llvm.dbg.declare
1215  // will take a value storing address of the memory for variable, not
1216  // alloca itself.
1217  SmallVector<uint64_t, 4> NewDIExpr;
1218  if (Deref)
1219    DIExprAddDeref(NewDIExpr);
1220  DIExprAddOffset(NewDIExpr, Offset);
1221  if (DIExpr)
1222    NewDIExpr.append(DIExpr->elements_begin(), DIExpr->elements_end());
1223  return Builder.createExpression(NewDIExpr);
1224}
1225
1226bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1227                             Instruction *InsertBefore, DIBuilder &Builder,
1228                             bool Deref, int Offset) {
1229  DbgDeclareInst *DDI = FindAllocaDbgDeclare(Address);
1230  if (!DDI)
1231    return false;
1232  DebugLoc Loc = DDI->getDebugLoc();
1233  auto *DIVar = DDI->getVariable();
1234  auto *DIExpr = DDI->getExpression();
1235  assert(DIVar && "Missing variable");
1236
1237  DIExpr = BuildReplacementDIExpr(Builder, DIExpr, Deref, Offset);
1238
1239  // Insert llvm.dbg.declare immediately after the original alloca, and remove
1240  // old llvm.dbg.declare.
1241  Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1242  DDI->eraseFromParent();
1243  return true;
1244}
1245
1246bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1247                                      DIBuilder &Builder, bool Deref, int Offset) {
1248  return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1249                           Deref, Offset);
1250}
1251
1252static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1253                                        DIBuilder &Builder, int Offset) {
1254  DebugLoc Loc = DVI->getDebugLoc();
1255  auto *DIVar = DVI->getVariable();
1256  auto *DIExpr = DVI->getExpression();
1257  assert(DIVar && "Missing variable");
1258
1259  // This is an alloca-based llvm.dbg.value. The first thing it should do with
1260  // the alloca pointer is dereference it. Otherwise we don't know how to handle
1261  // it and give up.
1262  if (!DIExpr || DIExpr->getNumElements() < 1 ||
1263      DIExpr->getElement(0) != dwarf::DW_OP_deref)
1264    return;
1265
1266  // Insert the offset immediately after the first deref.
1267  // We could just change the offset argument of dbg.value, but it's unsigned...
1268  if (Offset) {
1269    SmallVector<uint64_t, 4> NewDIExpr;
1270    DIExprAddDeref(NewDIExpr);
1271    DIExprAddOffset(NewDIExpr, Offset);
1272    NewDIExpr.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1273    DIExpr = Builder.createExpression(NewDIExpr);
1274  }
1275
1276  Builder.insertDbgValueIntrinsic(NewAddress, DVI->getOffset(), DIVar, DIExpr,
1277                                  Loc, DVI);
1278  DVI->eraseFromParent();
1279}
1280
1281void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1282                                    DIBuilder &Builder, int Offset) {
1283  if (auto *L = LocalAsMetadata::getIfExists(AI))
1284    if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1285      for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1286        Use &U = *UI++;
1287        if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1288          replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1289      }
1290}
1291
1292unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1293  unsigned NumDeadInst = 0;
1294  // Delete the instructions backwards, as it has a reduced likelihood of
1295  // having to update as many def-use and use-def chains.
1296  Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1297  while (EndInst != &BB->front()) {
1298    // Delete the next to last instruction.
1299    Instruction *Inst = &*--EndInst->getIterator();
1300    if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1301      Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1302    if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1303      EndInst = Inst;
1304      continue;
1305    }
1306    if (!isa<DbgInfoIntrinsic>(Inst))
1307      ++NumDeadInst;
1308    Inst->eraseFromParent();
1309  }
1310  return NumDeadInst;
1311}
1312
1313unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap) {
1314  BasicBlock *BB = I->getParent();
1315  // Loop over all of the successors, removing BB's entry from any PHI
1316  // nodes.
1317  for (BasicBlock *Successor : successors(BB))
1318    Successor->removePredecessor(BB);
1319
1320  // Insert a call to llvm.trap right before this.  This turns the undefined
1321  // behavior into a hard fail instead of falling through into random code.
1322  if (UseLLVMTrap) {
1323    Function *TrapFn =
1324      Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1325    CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1326    CallTrap->setDebugLoc(I->getDebugLoc());
1327  }
1328  new UnreachableInst(I->getContext(), I);
1329
1330  // All instructions after this are dead.
1331  unsigned NumInstrsRemoved = 0;
1332  BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1333  while (BBI != BBE) {
1334    if (!BBI->use_empty())
1335      BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1336    BB->getInstList().erase(BBI++);
1337    ++NumInstrsRemoved;
1338  }
1339  return NumInstrsRemoved;
1340}
1341
1342/// changeToCall - Convert the specified invoke into a normal call.
1343static void changeToCall(InvokeInst *II) {
1344  SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1345  SmallVector<OperandBundleDef, 1> OpBundles;
1346  II->getOperandBundlesAsDefs(OpBundles);
1347  CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1348                                       "", II);
1349  NewCall->takeName(II);
1350  NewCall->setCallingConv(II->getCallingConv());
1351  NewCall->setAttributes(II->getAttributes());
1352  NewCall->setDebugLoc(II->getDebugLoc());
1353  II->replaceAllUsesWith(NewCall);
1354
1355  // Follow the call by a branch to the normal destination.
1356  BranchInst::Create(II->getNormalDest(), II);
1357
1358  // Update PHI nodes in the unwind destination
1359  II->getUnwindDest()->removePredecessor(II->getParent());
1360  II->eraseFromParent();
1361}
1362
1363static bool markAliveBlocks(Function &F,
1364                            SmallPtrSetImpl<BasicBlock*> &Reachable) {
1365
1366  SmallVector<BasicBlock*, 128> Worklist;
1367  BasicBlock *BB = &F.front();
1368  Worklist.push_back(BB);
1369  Reachable.insert(BB);
1370  bool Changed = false;
1371  do {
1372    BB = Worklist.pop_back_val();
1373
1374    // Do a quick scan of the basic block, turning any obviously unreachable
1375    // instructions into LLVM unreachable insts.  The instruction combining pass
1376    // canonicalizes unreachable insts into stores to null or undef.
1377    for (Instruction &I : *BB) {
1378      // Assumptions that are known to be false are equivalent to unreachable.
1379      // Also, if the condition is undefined, then we make the choice most
1380      // beneficial to the optimizer, and choose that to also be unreachable.
1381      if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1382        if (II->getIntrinsicID() == Intrinsic::assume) {
1383          if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
1384            // Don't insert a call to llvm.trap right before the unreachable.
1385            changeToUnreachable(II, false);
1386            Changed = true;
1387            break;
1388          }
1389        }
1390
1391        if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
1392          // A call to the guard intrinsic bails out of the current compilation
1393          // unit if the predicate passed to it is false.  If the predicate is a
1394          // constant false, then we know the guard will bail out of the current
1395          // compile unconditionally, so all code following it is dead.
1396          //
1397          // Note: unlike in llvm.assume, it is not "obviously profitable" for
1398          // guards to treat `undef` as `false` since a guard on `undef` can
1399          // still be useful for widening.
1400          if (match(II->getArgOperand(0), m_Zero()))
1401            if (!isa<UnreachableInst>(II->getNextNode())) {
1402              changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/ false);
1403              Changed = true;
1404              break;
1405            }
1406        }
1407      }
1408
1409      if (auto *CI = dyn_cast<CallInst>(&I)) {
1410        Value *Callee = CI->getCalledValue();
1411        if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1412          changeToUnreachable(CI, /*UseLLVMTrap=*/false);
1413          Changed = true;
1414          break;
1415        }
1416        if (CI->doesNotReturn()) {
1417          // If we found a call to a no-return function, insert an unreachable
1418          // instruction after it.  Make sure there isn't *already* one there
1419          // though.
1420          if (!isa<UnreachableInst>(CI->getNextNode())) {
1421            // Don't insert a call to llvm.trap right before the unreachable.
1422            changeToUnreachable(CI->getNextNode(), false);
1423            Changed = true;
1424          }
1425          break;
1426        }
1427      }
1428
1429      // Store to undef and store to null are undefined and used to signal that
1430      // they should be changed to unreachable by passes that can't modify the
1431      // CFG.
1432      if (auto *SI = dyn_cast<StoreInst>(&I)) {
1433        // Don't touch volatile stores.
1434        if (SI->isVolatile()) continue;
1435
1436        Value *Ptr = SI->getOperand(1);
1437
1438        if (isa<UndefValue>(Ptr) ||
1439            (isa<ConstantPointerNull>(Ptr) &&
1440             SI->getPointerAddressSpace() == 0)) {
1441          changeToUnreachable(SI, true);
1442          Changed = true;
1443          break;
1444        }
1445      }
1446    }
1447
1448    TerminatorInst *Terminator = BB->getTerminator();
1449    if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
1450      // Turn invokes that call 'nounwind' functions into ordinary calls.
1451      Value *Callee = II->getCalledValue();
1452      if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1453        changeToUnreachable(II, true);
1454        Changed = true;
1455      } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
1456        if (II->use_empty() && II->onlyReadsMemory()) {
1457          // jump to the normal destination branch.
1458          BranchInst::Create(II->getNormalDest(), II);
1459          II->getUnwindDest()->removePredecessor(II->getParent());
1460          II->eraseFromParent();
1461        } else
1462          changeToCall(II);
1463        Changed = true;
1464      }
1465    } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
1466      // Remove catchpads which cannot be reached.
1467      struct CatchPadDenseMapInfo {
1468        static CatchPadInst *getEmptyKey() {
1469          return DenseMapInfo<CatchPadInst *>::getEmptyKey();
1470        }
1471        static CatchPadInst *getTombstoneKey() {
1472          return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
1473        }
1474        static unsigned getHashValue(CatchPadInst *CatchPad) {
1475          return static_cast<unsigned>(hash_combine_range(
1476              CatchPad->value_op_begin(), CatchPad->value_op_end()));
1477        }
1478        static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
1479          if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1480              RHS == getEmptyKey() || RHS == getTombstoneKey())
1481            return LHS == RHS;
1482          return LHS->isIdenticalTo(RHS);
1483        }
1484      };
1485
1486      // Set of unique CatchPads.
1487      SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
1488                    CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
1489          HandlerSet;
1490      detail::DenseSetEmpty Empty;
1491      for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
1492                                             E = CatchSwitch->handler_end();
1493           I != E; ++I) {
1494        BasicBlock *HandlerBB = *I;
1495        auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
1496        if (!HandlerSet.insert({CatchPad, Empty}).second) {
1497          CatchSwitch->removeHandler(I);
1498          --I;
1499          --E;
1500          Changed = true;
1501        }
1502      }
1503    }
1504
1505    Changed |= ConstantFoldTerminator(BB, true);
1506    for (BasicBlock *Successor : successors(BB))
1507      if (Reachable.insert(Successor).second)
1508        Worklist.push_back(Successor);
1509  } while (!Worklist.empty());
1510  return Changed;
1511}
1512
1513void llvm::removeUnwindEdge(BasicBlock *BB) {
1514  TerminatorInst *TI = BB->getTerminator();
1515
1516  if (auto *II = dyn_cast<InvokeInst>(TI)) {
1517    changeToCall(II);
1518    return;
1519  }
1520
1521  TerminatorInst *NewTI;
1522  BasicBlock *UnwindDest;
1523
1524  if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
1525    NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
1526    UnwindDest = CRI->getUnwindDest();
1527  } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
1528    auto *NewCatchSwitch = CatchSwitchInst::Create(
1529        CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
1530        CatchSwitch->getName(), CatchSwitch);
1531    for (BasicBlock *PadBB : CatchSwitch->handlers())
1532      NewCatchSwitch->addHandler(PadBB);
1533
1534    NewTI = NewCatchSwitch;
1535    UnwindDest = CatchSwitch->getUnwindDest();
1536  } else {
1537    llvm_unreachable("Could not find unwind successor");
1538  }
1539
1540  NewTI->takeName(TI);
1541  NewTI->setDebugLoc(TI->getDebugLoc());
1542  UnwindDest->removePredecessor(BB);
1543  TI->replaceAllUsesWith(NewTI);
1544  TI->eraseFromParent();
1545}
1546
1547/// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even
1548/// if they are in a dead cycle.  Return true if a change was made, false
1549/// otherwise.
1550bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
1551  SmallPtrSet<BasicBlock*, 16> Reachable;
1552  bool Changed = markAliveBlocks(F, Reachable);
1553
1554  // If there are unreachable blocks in the CFG...
1555  if (Reachable.size() == F.size())
1556    return Changed;
1557
1558  assert(Reachable.size() < F.size());
1559  NumRemoved += F.size()-Reachable.size();
1560
1561  // Loop over all of the basic blocks that are not reachable, dropping all of
1562  // their internal references...
1563  for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
1564    if (Reachable.count(&*BB))
1565      continue;
1566
1567    for (BasicBlock *Successor : successors(&*BB))
1568      if (Reachable.count(Successor))
1569        Successor->removePredecessor(&*BB);
1570    if (LVI)
1571      LVI->eraseBlock(&*BB);
1572    BB->dropAllReferences();
1573  }
1574
1575  for (Function::iterator I = ++F.begin(); I != F.end();)
1576    if (!Reachable.count(&*I))
1577      I = F.getBasicBlockList().erase(I);
1578    else
1579      ++I;
1580
1581  return true;
1582}
1583
1584void llvm::combineMetadata(Instruction *K, const Instruction *J,
1585                           ArrayRef<unsigned> KnownIDs) {
1586  SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
1587  K->dropUnknownNonDebugMetadata(KnownIDs);
1588  K->getAllMetadataOtherThanDebugLoc(Metadata);
1589  for (unsigned i = 0, n = Metadata.size(); i < n; ++i) {
1590    unsigned Kind = Metadata[i].first;
1591    MDNode *JMD = J->getMetadata(Kind);
1592    MDNode *KMD = Metadata[i].second;
1593
1594    switch (Kind) {
1595      default:
1596        K->setMetadata(Kind, nullptr); // Remove unknown metadata
1597        break;
1598      case LLVMContext::MD_dbg:
1599        llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
1600      case LLVMContext::MD_tbaa:
1601        K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
1602        break;
1603      case LLVMContext::MD_alias_scope:
1604        K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
1605        break;
1606      case LLVMContext::MD_noalias:
1607      case LLVMContext::MD_mem_parallel_loop_access:
1608        K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
1609        break;
1610      case LLVMContext::MD_range:
1611        K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
1612        break;
1613      case LLVMContext::MD_fpmath:
1614        K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
1615        break;
1616      case LLVMContext::MD_invariant_load:
1617        // Only set the !invariant.load if it is present in both instructions.
1618        K->setMetadata(Kind, JMD);
1619        break;
1620      case LLVMContext::MD_nonnull:
1621        // Only set the !nonnull if it is present in both instructions.
1622        K->setMetadata(Kind, JMD);
1623        break;
1624      case LLVMContext::MD_invariant_group:
1625        // Preserve !invariant.group in K.
1626        break;
1627      case LLVMContext::MD_align:
1628        K->setMetadata(Kind,
1629          MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1630        break;
1631      case LLVMContext::MD_dereferenceable:
1632      case LLVMContext::MD_dereferenceable_or_null:
1633        K->setMetadata(Kind,
1634          MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1635        break;
1636    }
1637  }
1638  // Set !invariant.group from J if J has it. If both instructions have it
1639  // then we will just pick it from J - even when they are different.
1640  // Also make sure that K is load or store - f.e. combining bitcast with load
1641  // could produce bitcast with invariant.group metadata, which is invalid.
1642  // FIXME: we should try to preserve both invariant.group md if they are
1643  // different, but right now instruction can only have one invariant.group.
1644  if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
1645    if (isa<LoadInst>(K) || isa<StoreInst>(K))
1646      K->setMetadata(LLVMContext::MD_invariant_group, JMD);
1647}
1648
1649unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1650                                        DominatorTree &DT,
1651                                        const BasicBlockEdge &Root) {
1652  assert(From->getType() == To->getType());
1653
1654  unsigned Count = 0;
1655  for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1656       UI != UE; ) {
1657    Use &U = *UI++;
1658    if (DT.dominates(Root, U)) {
1659      U.set(To);
1660      DEBUG(dbgs() << "Replace dominated use of '"
1661            << From->getName() << "' as "
1662            << *To << " in " << *U << "\n");
1663      ++Count;
1664    }
1665  }
1666  return Count;
1667}
1668
1669unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1670                                        DominatorTree &DT,
1671                                        const BasicBlock *BB) {
1672  assert(From->getType() == To->getType());
1673
1674  unsigned Count = 0;
1675  for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1676       UI != UE;) {
1677    Use &U = *UI++;
1678    auto *I = cast<Instruction>(U.getUser());
1679    if (DT.properlyDominates(BB, I->getParent())) {
1680      U.set(To);
1681      DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
1682                   << *To << " in " << *U << "\n");
1683      ++Count;
1684    }
1685  }
1686  return Count;
1687}
1688
1689bool llvm::callsGCLeafFunction(ImmutableCallSite CS) {
1690  // Check if the function is specifically marked as a gc leaf function.
1691  if (CS.hasFnAttr("gc-leaf-function"))
1692    return true;
1693  if (const Function *F = CS.getCalledFunction()) {
1694    if (F->hasFnAttribute("gc-leaf-function"))
1695      return true;
1696
1697    if (auto IID = F->getIntrinsicID())
1698      // Most LLVM intrinsics do not take safepoints.
1699      return IID != Intrinsic::experimental_gc_statepoint &&
1700             IID != Intrinsic::experimental_deoptimize;
1701  }
1702
1703  return false;
1704}
1705
1706/// A potential constituent of a bitreverse or bswap expression. See
1707/// collectBitParts for a fuller explanation.
1708struct BitPart {
1709  BitPart(Value *P, unsigned BW) : Provider(P) {
1710    Provenance.resize(BW);
1711  }
1712
1713  /// The Value that this is a bitreverse/bswap of.
1714  Value *Provider;
1715  /// The "provenance" of each bit. Provenance[A] = B means that bit A
1716  /// in Provider becomes bit B in the result of this expression.
1717  SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
1718
1719  enum { Unset = -1 };
1720};
1721
1722/// Analyze the specified subexpression and see if it is capable of providing
1723/// pieces of a bswap or bitreverse. The subexpression provides a potential
1724/// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
1725/// the output of the expression came from a corresponding bit in some other
1726/// value. This function is recursive, and the end result is a mapping of
1727/// bitnumber to bitnumber. It is the caller's responsibility to validate that
1728/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
1729///
1730/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
1731/// that the expression deposits the low byte of %X into the high byte of the
1732/// result and that all other bits are zero. This expression is accepted and a
1733/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
1734/// [0-7].
1735///
1736/// To avoid revisiting values, the BitPart results are memoized into the
1737/// provided map. To avoid unnecessary copying of BitParts, BitParts are
1738/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
1739/// store BitParts objects, not pointers. As we need the concept of a nullptr
1740/// BitParts (Value has been analyzed and the analysis failed), we an Optional
1741/// type instead to provide the same functionality.
1742///
1743/// Because we pass around references into \c BPS, we must use a container that
1744/// does not invalidate internal references (std::map instead of DenseMap).
1745///
1746static const Optional<BitPart> &
1747collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
1748                std::map<Value *, Optional<BitPart>> &BPS) {
1749  auto I = BPS.find(V);
1750  if (I != BPS.end())
1751    return I->second;
1752
1753  auto &Result = BPS[V] = None;
1754  auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
1755
1756  if (Instruction *I = dyn_cast<Instruction>(V)) {
1757    // If this is an or instruction, it may be an inner node of the bswap.
1758    if (I->getOpcode() == Instruction::Or) {
1759      auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
1760                                MatchBitReversals, BPS);
1761      auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
1762                                MatchBitReversals, BPS);
1763      if (!A || !B)
1764        return Result;
1765
1766      // Try and merge the two together.
1767      if (!A->Provider || A->Provider != B->Provider)
1768        return Result;
1769
1770      Result = BitPart(A->Provider, BitWidth);
1771      for (unsigned i = 0; i < A->Provenance.size(); ++i) {
1772        if (A->Provenance[i] != BitPart::Unset &&
1773            B->Provenance[i] != BitPart::Unset &&
1774            A->Provenance[i] != B->Provenance[i])
1775          return Result = None;
1776
1777        if (A->Provenance[i] == BitPart::Unset)
1778          Result->Provenance[i] = B->Provenance[i];
1779        else
1780          Result->Provenance[i] = A->Provenance[i];
1781      }
1782
1783      return Result;
1784    }
1785
1786    // If this is a logical shift by a constant, recurse then shift the result.
1787    if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
1788      unsigned BitShift =
1789          cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
1790      // Ensure the shift amount is defined.
1791      if (BitShift > BitWidth)
1792        return Result;
1793
1794      auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
1795                                  MatchBitReversals, BPS);
1796      if (!Res)
1797        return Result;
1798      Result = Res;
1799
1800      // Perform the "shift" on BitProvenance.
1801      auto &P = Result->Provenance;
1802      if (I->getOpcode() == Instruction::Shl) {
1803        P.erase(std::prev(P.end(), BitShift), P.end());
1804        P.insert(P.begin(), BitShift, BitPart::Unset);
1805      } else {
1806        P.erase(P.begin(), std::next(P.begin(), BitShift));
1807        P.insert(P.end(), BitShift, BitPart::Unset);
1808      }
1809
1810      return Result;
1811    }
1812
1813    // If this is a logical 'and' with a mask that clears bits, recurse then
1814    // unset the appropriate bits.
1815    if (I->getOpcode() == Instruction::And &&
1816        isa<ConstantInt>(I->getOperand(1))) {
1817      APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
1818      const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
1819
1820      // Check that the mask allows a multiple of 8 bits for a bswap, for an
1821      // early exit.
1822      unsigned NumMaskedBits = AndMask.countPopulation();
1823      if (!MatchBitReversals && NumMaskedBits % 8 != 0)
1824        return Result;
1825
1826      auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
1827                                  MatchBitReversals, BPS);
1828      if (!Res)
1829        return Result;
1830      Result = Res;
1831
1832      for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
1833        // If the AndMask is zero for this bit, clear the bit.
1834        if ((AndMask & Bit) == 0)
1835          Result->Provenance[i] = BitPart::Unset;
1836      return Result;
1837    }
1838
1839    // If this is a zext instruction zero extend the result.
1840    if (I->getOpcode() == Instruction::ZExt) {
1841      auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
1842                                  MatchBitReversals, BPS);
1843      if (!Res)
1844        return Result;
1845
1846      Result = BitPart(Res->Provider, BitWidth);
1847      auto NarrowBitWidth =
1848          cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
1849      for (unsigned i = 0; i < NarrowBitWidth; ++i)
1850        Result->Provenance[i] = Res->Provenance[i];
1851      for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
1852        Result->Provenance[i] = BitPart::Unset;
1853      return Result;
1854    }
1855  }
1856
1857  // Okay, we got to something that isn't a shift, 'or' or 'and'.  This must be
1858  // the input value to the bswap/bitreverse.
1859  Result = BitPart(V, BitWidth);
1860  for (unsigned i = 0; i < BitWidth; ++i)
1861    Result->Provenance[i] = i;
1862  return Result;
1863}
1864
1865static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
1866                                          unsigned BitWidth) {
1867  if (From % 8 != To % 8)
1868    return false;
1869  // Convert from bit indices to byte indices and check for a byte reversal.
1870  From >>= 3;
1871  To >>= 3;
1872  BitWidth >>= 3;
1873  return From == BitWidth - To - 1;
1874}
1875
1876static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
1877                                               unsigned BitWidth) {
1878  return From == BitWidth - To - 1;
1879}
1880
1881/// Given an OR instruction, check to see if this is a bitreverse
1882/// idiom. If so, insert the new intrinsic and return true.
1883bool llvm::recognizeBSwapOrBitReverseIdiom(
1884    Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
1885    SmallVectorImpl<Instruction *> &InsertedInsts) {
1886  if (Operator::getOpcode(I) != Instruction::Or)
1887    return false;
1888  if (!MatchBSwaps && !MatchBitReversals)
1889    return false;
1890  IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
1891  if (!ITy || ITy->getBitWidth() > 128)
1892    return false;   // Can't do vectors or integers > 128 bits.
1893  unsigned BW = ITy->getBitWidth();
1894
1895  unsigned DemandedBW = BW;
1896  IntegerType *DemandedTy = ITy;
1897  if (I->hasOneUse()) {
1898    if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
1899      DemandedTy = cast<IntegerType>(Trunc->getType());
1900      DemandedBW = DemandedTy->getBitWidth();
1901    }
1902  }
1903
1904  // Try to find all the pieces corresponding to the bswap.
1905  std::map<Value *, Optional<BitPart>> BPS;
1906  auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
1907  if (!Res)
1908    return false;
1909  auto &BitProvenance = Res->Provenance;
1910
1911  // Now, is the bit permutation correct for a bswap or a bitreverse? We can
1912  // only byteswap values with an even number of bytes.
1913  bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
1914  for (unsigned i = 0; i < DemandedBW; ++i) {
1915    OKForBSwap &=
1916        bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
1917    OKForBitReverse &=
1918        bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
1919  }
1920
1921  Intrinsic::ID Intrin;
1922  if (OKForBSwap && MatchBSwaps)
1923    Intrin = Intrinsic::bswap;
1924  else if (OKForBitReverse && MatchBitReversals)
1925    Intrin = Intrinsic::bitreverse;
1926  else
1927    return false;
1928
1929  if (ITy != DemandedTy) {
1930    Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
1931    Value *Provider = Res->Provider;
1932    IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
1933    // We may need to truncate the provider.
1934    if (DemandedTy != ProviderTy) {
1935      auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
1936                                     "trunc", I);
1937      InsertedInsts.push_back(Trunc);
1938      Provider = Trunc;
1939    }
1940    auto *CI = CallInst::Create(F, Provider, "rev", I);
1941    InsertedInsts.push_back(CI);
1942    auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
1943    InsertedInsts.push_back(ExtInst);
1944    return true;
1945  }
1946
1947  Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
1948  InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
1949  return true;
1950}
1951
1952// CodeGen has special handling for some string functions that may replace
1953// them with target-specific intrinsics.  Since that'd skip our interceptors
1954// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
1955// we mark affected calls as NoBuiltin, which will disable optimization
1956// in CodeGen.
1957void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
1958                                          const TargetLibraryInfo *TLI) {
1959  Function *F = CI->getCalledFunction();
1960  LibFunc::Func Func;
1961  if (!F || F->hasLocalLinkage() || !F->hasName() ||
1962      !TLI->getLibFunc(F->getName(), Func))
1963    return;
1964  switch (Func) {
1965    default: break;
1966    case LibFunc::memcmp:
1967    case LibFunc::memchr:
1968    case LibFunc::strcpy:
1969    case LibFunc::stpcpy:
1970    case LibFunc::strcmp:
1971    case LibFunc::strlen:
1972    case LibFunc::strnlen:
1973      CI->addAttribute(AttributeSet::FunctionIndex, Attribute::NoBuiltin);
1974      break;
1975  }
1976}
1977