1//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains code dealing with the IR generation for cleanups
11// and related information.
12//
13// A "cleanup" is a piece of code which needs to be executed whenever
14// control transfers out of a particular scope.  This can be
15// conditionalized to occur only on exceptional control flow, only on
16// normal control flow, or both.
17//
18//===----------------------------------------------------------------------===//
19
20#include "CodeGenFunction.h"
21#include "CGCleanup.h"
22
23using namespace clang;
24using namespace CodeGen;
25
26bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
27  if (rv.isScalar())
28    return DominatingLLVMValue::needsSaving(rv.getScalarVal());
29  if (rv.isAggregate())
30    return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
31  return true;
32}
33
34DominatingValue<RValue>::saved_type
35DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
36  if (rv.isScalar()) {
37    llvm::Value *V = rv.getScalarVal();
38
39    // These automatically dominate and don't need to be saved.
40    if (!DominatingLLVMValue::needsSaving(V))
41      return saved_type(V, ScalarLiteral);
42
43    // Everything else needs an alloca.
44    llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
45    CGF.Builder.CreateStore(V, addr);
46    return saved_type(addr, ScalarAddress);
47  }
48
49  if (rv.isComplex()) {
50    CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
51    llvm::Type *ComplexTy =
52      llvm::StructType::get(V.first->getType(), V.second->getType(),
53                            (void*) 0);
54    llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
55    CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
56    CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
57    return saved_type(addr, ComplexAddress);
58  }
59
60  assert(rv.isAggregate());
61  llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
62  if (!DominatingLLVMValue::needsSaving(V))
63    return saved_type(V, AggregateLiteral);
64
65  llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
66  CGF.Builder.CreateStore(V, addr);
67  return saved_type(addr, AggregateAddress);
68}
69
70/// Given a saved r-value produced by SaveRValue, perform the code
71/// necessary to restore it to usability at the current insertion
72/// point.
73RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
74  switch (K) {
75  case ScalarLiteral:
76    return RValue::get(Value);
77  case ScalarAddress:
78    return RValue::get(CGF.Builder.CreateLoad(Value));
79  case AggregateLiteral:
80    return RValue::getAggregate(Value);
81  case AggregateAddress:
82    return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
83  case ComplexAddress: {
84    llvm::Value *real =
85      CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(Value, 0));
86    llvm::Value *imag =
87      CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(Value, 1));
88    return RValue::getComplex(real, imag);
89  }
90  }
91
92  llvm_unreachable("bad saved r-value kind");
93}
94
95/// Push an entry of the given size onto this protected-scope stack.
96char *EHScopeStack::allocate(size_t Size) {
97  if (!StartOfBuffer) {
98    unsigned Capacity = 1024;
99    while (Capacity < Size) Capacity *= 2;
100    StartOfBuffer = new char[Capacity];
101    StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
102  } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
103    unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
104    unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
105
106    unsigned NewCapacity = CurrentCapacity;
107    do {
108      NewCapacity *= 2;
109    } while (NewCapacity < UsedCapacity + Size);
110
111    char *NewStartOfBuffer = new char[NewCapacity];
112    char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
113    char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
114    memcpy(NewStartOfData, StartOfData, UsedCapacity);
115    delete [] StartOfBuffer;
116    StartOfBuffer = NewStartOfBuffer;
117    EndOfBuffer = NewEndOfBuffer;
118    StartOfData = NewStartOfData;
119  }
120
121  assert(StartOfBuffer + Size <= StartOfData);
122  StartOfData -= Size;
123  return StartOfData;
124}
125
126EHScopeStack::stable_iterator
127EHScopeStack::getInnermostActiveNormalCleanup() const {
128  for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
129         si != se; ) {
130    EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
131    if (cleanup.isActive()) return si;
132    si = cleanup.getEnclosingNormalCleanup();
133  }
134  return stable_end();
135}
136
137EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
138  for (stable_iterator si = getInnermostEHScope(), se = stable_end();
139         si != se; ) {
140    // Skip over inactive cleanups.
141    EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
142    if (cleanup && !cleanup->isActive()) {
143      si = cleanup->getEnclosingEHScope();
144      continue;
145    }
146
147    // All other scopes are always active.
148    return si;
149  }
150
151  return stable_end();
152}
153
154
155void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
156  assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
157  char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
158  bool IsNormalCleanup = Kind & NormalCleanup;
159  bool IsEHCleanup = Kind & EHCleanup;
160  bool IsActive = !(Kind & InactiveCleanup);
161  EHCleanupScope *Scope =
162    new (Buffer) EHCleanupScope(IsNormalCleanup,
163                                IsEHCleanup,
164                                IsActive,
165                                Size,
166                                BranchFixups.size(),
167                                InnermostNormalCleanup,
168                                InnermostEHScope);
169  if (IsNormalCleanup)
170    InnermostNormalCleanup = stable_begin();
171  if (IsEHCleanup)
172    InnermostEHScope = stable_begin();
173
174  return Scope->getCleanupBuffer();
175}
176
177void EHScopeStack::popCleanup() {
178  assert(!empty() && "popping exception stack when not empty");
179
180  assert(isa<EHCleanupScope>(*begin()));
181  EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
182  InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
183  InnermostEHScope = Cleanup.getEnclosingEHScope();
184  StartOfData += Cleanup.getAllocatedSize();
185
186  // Destroy the cleanup.
187  Cleanup.~EHCleanupScope();
188
189  // Check whether we can shrink the branch-fixups stack.
190  if (!BranchFixups.empty()) {
191    // If we no longer have any normal cleanups, all the fixups are
192    // complete.
193    if (!hasNormalCleanups())
194      BranchFixups.clear();
195
196    // Otherwise we can still trim out unnecessary nulls.
197    else
198      popNullFixups();
199  }
200}
201
202EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
203  assert(getInnermostEHScope() == stable_end());
204  char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
205  EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
206  InnermostEHScope = stable_begin();
207  return filter;
208}
209
210void EHScopeStack::popFilter() {
211  assert(!empty() && "popping exception stack when not empty");
212
213  EHFilterScope &filter = cast<EHFilterScope>(*begin());
214  StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
215
216  InnermostEHScope = filter.getEnclosingEHScope();
217}
218
219EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
220  char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
221  EHCatchScope *scope =
222    new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
223  InnermostEHScope = stable_begin();
224  return scope;
225}
226
227void EHScopeStack::pushTerminate() {
228  char *Buffer = allocate(EHTerminateScope::getSize());
229  new (Buffer) EHTerminateScope(InnermostEHScope);
230  InnermostEHScope = stable_begin();
231}
232
233/// Remove any 'null' fixups on the stack.  However, we can't pop more
234/// fixups than the fixup depth on the innermost normal cleanup, or
235/// else fixups that we try to add to that cleanup will end up in the
236/// wrong place.  We *could* try to shrink fixup depths, but that's
237/// actually a lot of work for little benefit.
238void EHScopeStack::popNullFixups() {
239  // We expect this to only be called when there's still an innermost
240  // normal cleanup;  otherwise there really shouldn't be any fixups.
241  assert(hasNormalCleanups());
242
243  EHScopeStack::iterator it = find(InnermostNormalCleanup);
244  unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
245  assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
246
247  while (BranchFixups.size() > MinSize &&
248         BranchFixups.back().Destination == 0)
249    BranchFixups.pop_back();
250}
251
252void CodeGenFunction::initFullExprCleanup() {
253  // Create a variable to decide whether the cleanup needs to be run.
254  llvm::AllocaInst *active
255    = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
256
257  // Initialize it to false at a site that's guaranteed to be run
258  // before each evaluation.
259  setBeforeOutermostConditional(Builder.getFalse(), active);
260
261  // Initialize it to true at the current location.
262  Builder.CreateStore(Builder.getTrue(), active);
263
264  // Set that as the active flag in the cleanup.
265  EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
266  assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
267  cleanup.setActiveFlag(active);
268
269  if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
270  if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
271}
272
273void EHScopeStack::Cleanup::anchor() {}
274
275/// All the branch fixups on the EH stack have propagated out past the
276/// outermost normal cleanup; resolve them all by adding cases to the
277/// given switch instruction.
278static void ResolveAllBranchFixups(CodeGenFunction &CGF,
279                                   llvm::SwitchInst *Switch,
280                                   llvm::BasicBlock *CleanupEntry) {
281  llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
282
283  for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
284    // Skip this fixup if its destination isn't set.
285    BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
286    if (Fixup.Destination == 0) continue;
287
288    // If there isn't an OptimisticBranchBlock, then InitialBranch is
289    // still pointing directly to its destination; forward it to the
290    // appropriate cleanup entry.  This is required in the specific
291    // case of
292    //   { std::string s; goto lbl; }
293    //   lbl:
294    // i.e. where there's an unresolved fixup inside a single cleanup
295    // entry which we're currently popping.
296    if (Fixup.OptimisticBranchBlock == 0) {
297      new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
298                          CGF.getNormalCleanupDestSlot(),
299                          Fixup.InitialBranch);
300      Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
301    }
302
303    // Don't add this case to the switch statement twice.
304    if (!CasesAdded.insert(Fixup.Destination)) continue;
305
306    Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
307                    Fixup.Destination);
308  }
309
310  CGF.EHStack.clearFixups();
311}
312
313/// Transitions the terminator of the given exit-block of a cleanup to
314/// be a cleanup switch.
315static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
316                                                   llvm::BasicBlock *Block) {
317  // If it's a branch, turn it into a switch whose default
318  // destination is its original target.
319  llvm::TerminatorInst *Term = Block->getTerminator();
320  assert(Term && "can't transition block without terminator");
321
322  if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
323    assert(Br->isUnconditional());
324    llvm::LoadInst *Load =
325      new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
326    llvm::SwitchInst *Switch =
327      llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
328    Br->eraseFromParent();
329    return Switch;
330  } else {
331    return cast<llvm::SwitchInst>(Term);
332  }
333}
334
335void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
336  assert(Block && "resolving a null target block");
337  if (!EHStack.getNumBranchFixups()) return;
338
339  assert(EHStack.hasNormalCleanups() &&
340         "branch fixups exist with no normal cleanups on stack");
341
342  llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
343  bool ResolvedAny = false;
344
345  for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
346    // Skip this fixup if its destination doesn't match.
347    BranchFixup &Fixup = EHStack.getBranchFixup(I);
348    if (Fixup.Destination != Block) continue;
349
350    Fixup.Destination = 0;
351    ResolvedAny = true;
352
353    // If it doesn't have an optimistic branch block, LatestBranch is
354    // already pointing to the right place.
355    llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
356    if (!BranchBB)
357      continue;
358
359    // Don't process the same optimistic branch block twice.
360    if (!ModifiedOptimisticBlocks.insert(BranchBB))
361      continue;
362
363    llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
364
365    // Add a case to the switch.
366    Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
367  }
368
369  if (ResolvedAny)
370    EHStack.popNullFixups();
371}
372
373/// Pops cleanup blocks until the given savepoint is reached.
374void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
375  assert(Old.isValid());
376
377  while (EHStack.stable_begin() != Old) {
378    EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
379
380    // As long as Old strictly encloses the scope's enclosing normal
381    // cleanup, we're going to emit another normal cleanup which
382    // fallthrough can propagate through.
383    bool FallThroughIsBranchThrough =
384      Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
385
386    PopCleanupBlock(FallThroughIsBranchThrough);
387  }
388}
389
390static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
391                                           EHCleanupScope &Scope) {
392  assert(Scope.isNormalCleanup());
393  llvm::BasicBlock *Entry = Scope.getNormalBlock();
394  if (!Entry) {
395    Entry = CGF.createBasicBlock("cleanup");
396    Scope.setNormalBlock(Entry);
397  }
398  return Entry;
399}
400
401/// Attempts to reduce a cleanup's entry block to a fallthrough.  This
402/// is basically llvm::MergeBlockIntoPredecessor, except
403/// simplified/optimized for the tighter constraints on cleanup blocks.
404///
405/// Returns the new block, whatever it is.
406static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
407                                              llvm::BasicBlock *Entry) {
408  llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
409  if (!Pred) return Entry;
410
411  llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
412  if (!Br || Br->isConditional()) return Entry;
413  assert(Br->getSuccessor(0) == Entry);
414
415  // If we were previously inserting at the end of the cleanup entry
416  // block, we'll need to continue inserting at the end of the
417  // predecessor.
418  bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
419  assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
420
421  // Kill the branch.
422  Br->eraseFromParent();
423
424  // Replace all uses of the entry with the predecessor, in case there
425  // are phis in the cleanup.
426  Entry->replaceAllUsesWith(Pred);
427
428  // Merge the blocks.
429  Pred->getInstList().splice(Pred->end(), Entry->getInstList());
430
431  // Kill the entry block.
432  Entry->eraseFromParent();
433
434  if (WasInsertBlock)
435    CGF.Builder.SetInsertPoint(Pred);
436
437  return Pred;
438}
439
440static void EmitCleanup(CodeGenFunction &CGF,
441                        EHScopeStack::Cleanup *Fn,
442                        EHScopeStack::Cleanup::Flags flags,
443                        llvm::Value *ActiveFlag) {
444  // EH cleanups always occur within a terminate scope.
445  if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
446
447  // If there's an active flag, load it and skip the cleanup if it's
448  // false.
449  llvm::BasicBlock *ContBB = 0;
450  if (ActiveFlag) {
451    ContBB = CGF.createBasicBlock("cleanup.done");
452    llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
453    llvm::Value *IsActive
454      = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
455    CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
456    CGF.EmitBlock(CleanupBB);
457  }
458
459  // Ask the cleanup to emit itself.
460  Fn->Emit(CGF, flags);
461  assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
462
463  // Emit the continuation block if there was an active flag.
464  if (ActiveFlag)
465    CGF.EmitBlock(ContBB);
466
467  // Leave the terminate scope.
468  if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
469}
470
471static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
472                                          llvm::BasicBlock *From,
473                                          llvm::BasicBlock *To) {
474  // Exit is the exit block of a cleanup, so it always terminates in
475  // an unconditional branch or a switch.
476  llvm::TerminatorInst *Term = Exit->getTerminator();
477
478  if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
479    assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
480    Br->setSuccessor(0, To);
481  } else {
482    llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
483    for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
484      if (Switch->getSuccessor(I) == From)
485        Switch->setSuccessor(I, To);
486  }
487}
488
489/// We don't need a normal entry block for the given cleanup.
490/// Optimistic fixup branches can cause these blocks to come into
491/// existence anyway;  if so, destroy it.
492///
493/// The validity of this transformation is very much specific to the
494/// exact ways in which we form branches to cleanup entries.
495static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
496                                         EHCleanupScope &scope) {
497  llvm::BasicBlock *entry = scope.getNormalBlock();
498  if (!entry) return;
499
500  // Replace all the uses with unreachable.
501  llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
502  for (llvm::BasicBlock::use_iterator
503         i = entry->use_begin(), e = entry->use_end(); i != e; ) {
504    llvm::Use &use = i.getUse();
505    ++i;
506
507    use.set(unreachableBB);
508
509    // The only uses should be fixup switches.
510    llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
511    if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
512      // Replace the switch with a branch.
513      llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
514
515      // The switch operand is a load from the cleanup-dest alloca.
516      llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
517
518      // Destroy the switch.
519      si->eraseFromParent();
520
521      // Destroy the load.
522      assert(condition->getOperand(0) == CGF.NormalCleanupDest);
523      assert(condition->use_empty());
524      condition->eraseFromParent();
525    }
526  }
527
528  assert(entry->use_empty());
529  delete entry;
530}
531
532/// Pops a cleanup block.  If the block includes a normal cleanup, the
533/// current insertion point is threaded through the cleanup, as are
534/// any branch fixups on the cleanup.
535void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
536  assert(!EHStack.empty() && "cleanup stack is empty!");
537  assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
538  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
539  assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
540
541  // Remember activation information.
542  bool IsActive = Scope.isActive();
543  llvm::Value *NormalActiveFlag =
544    Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
545  llvm::Value *EHActiveFlag =
546    Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
547
548  // Check whether we need an EH cleanup.  This is only true if we've
549  // generated a lazy EH cleanup block.
550  llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
551  assert(Scope.hasEHBranches() == (EHEntry != 0));
552  bool RequiresEHCleanup = (EHEntry != 0);
553  EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
554
555  // Check the three conditions which might require a normal cleanup:
556
557  // - whether there are branch fix-ups through this cleanup
558  unsigned FixupDepth = Scope.getFixupDepth();
559  bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
560
561  // - whether there are branch-throughs or branch-afters
562  bool HasExistingBranches = Scope.hasBranches();
563
564  // - whether there's a fallthrough
565  llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
566  bool HasFallthrough = (FallthroughSource != 0 && IsActive);
567
568  // Branch-through fall-throughs leave the insertion point set to the
569  // end of the last cleanup, which points to the current scope.  The
570  // rest of IR gen doesn't need to worry about this; it only happens
571  // during the execution of PopCleanupBlocks().
572  bool HasPrebranchedFallthrough =
573    (FallthroughSource && FallthroughSource->getTerminator());
574
575  // If this is a normal cleanup, then having a prebranched
576  // fallthrough implies that the fallthrough source unconditionally
577  // jumps here.
578  assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
579         (Scope.getNormalBlock() &&
580          FallthroughSource->getTerminator()->getSuccessor(0)
581            == Scope.getNormalBlock()));
582
583  bool RequiresNormalCleanup = false;
584  if (Scope.isNormalCleanup() &&
585      (HasFixups || HasExistingBranches || HasFallthrough)) {
586    RequiresNormalCleanup = true;
587  }
588
589  // If we have a prebranched fallthrough into an inactive normal
590  // cleanup, rewrite it so that it leads to the appropriate place.
591  if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
592    llvm::BasicBlock *prebranchDest;
593
594    // If the prebranch is semantically branching through the next
595    // cleanup, just forward it to the next block, leaving the
596    // insertion point in the prebranched block.
597    if (FallthroughIsBranchThrough) {
598      EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
599      prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
600
601    // Otherwise, we need to make a new block.  If the normal cleanup
602    // isn't being used at all, we could actually reuse the normal
603    // entry block, but this is simpler, and it avoids conflicts with
604    // dead optimistic fixup branches.
605    } else {
606      prebranchDest = createBasicBlock("forwarded-prebranch");
607      EmitBlock(prebranchDest);
608    }
609
610    llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
611    assert(normalEntry && !normalEntry->use_empty());
612
613    ForwardPrebranchedFallthrough(FallthroughSource,
614                                  normalEntry, prebranchDest);
615  }
616
617  // If we don't need the cleanup at all, we're done.
618  if (!RequiresNormalCleanup && !RequiresEHCleanup) {
619    destroyOptimisticNormalEntry(*this, Scope);
620    EHStack.popCleanup(); // safe because there are no fixups
621    assert(EHStack.getNumBranchFixups() == 0 ||
622           EHStack.hasNormalCleanups());
623    return;
624  }
625
626  // Copy the cleanup emission data out.  Note that SmallVector
627  // guarantees maximal alignment for its buffer regardless of its
628  // type parameter.
629  SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
630  CleanupBuffer.reserve(Scope.getCleanupSize());
631  memcpy(CleanupBuffer.data(),
632         Scope.getCleanupBuffer(), Scope.getCleanupSize());
633  CleanupBuffer.set_size(Scope.getCleanupSize());
634  EHScopeStack::Cleanup *Fn =
635    reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
636
637  EHScopeStack::Cleanup::Flags cleanupFlags;
638  if (Scope.isNormalCleanup())
639    cleanupFlags.setIsNormalCleanupKind();
640  if (Scope.isEHCleanup())
641    cleanupFlags.setIsEHCleanupKind();
642
643  if (!RequiresNormalCleanup) {
644    destroyOptimisticNormalEntry(*this, Scope);
645    EHStack.popCleanup();
646  } else {
647    // If we have a fallthrough and no other need for the cleanup,
648    // emit it directly.
649    if (HasFallthrough && !HasPrebranchedFallthrough &&
650        !HasFixups && !HasExistingBranches) {
651
652      destroyOptimisticNormalEntry(*this, Scope);
653      EHStack.popCleanup();
654
655      EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
656
657    // Otherwise, the best approach is to thread everything through
658    // the cleanup block and then try to clean up after ourselves.
659    } else {
660      // Force the entry block to exist.
661      llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
662
663      // I.  Set up the fallthrough edge in.
664
665      CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
666
667      // If there's a fallthrough, we need to store the cleanup
668      // destination index.  For fall-throughs this is always zero.
669      if (HasFallthrough) {
670        if (!HasPrebranchedFallthrough)
671          Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
672
673      // Otherwise, save and clear the IP if we don't have fallthrough
674      // because the cleanup is inactive.
675      } else if (FallthroughSource) {
676        assert(!IsActive && "source without fallthrough for active cleanup");
677        savedInactiveFallthroughIP = Builder.saveAndClearIP();
678      }
679
680      // II.  Emit the entry block.  This implicitly branches to it if
681      // we have fallthrough.  All the fixups and existing branches
682      // should already be branched to it.
683      EmitBlock(NormalEntry);
684
685      // III.  Figure out where we're going and build the cleanup
686      // epilogue.
687
688      bool HasEnclosingCleanups =
689        (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
690
691      // Compute the branch-through dest if we need it:
692      //   - if there are branch-throughs threaded through the scope
693      //   - if fall-through is a branch-through
694      //   - if there are fixups that will be optimistically forwarded
695      //     to the enclosing cleanup
696      llvm::BasicBlock *BranchThroughDest = 0;
697      if (Scope.hasBranchThroughs() ||
698          (FallthroughSource && FallthroughIsBranchThrough) ||
699          (HasFixups && HasEnclosingCleanups)) {
700        assert(HasEnclosingCleanups);
701        EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
702        BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
703      }
704
705      llvm::BasicBlock *FallthroughDest = 0;
706      SmallVector<llvm::Instruction*, 2> InstsToAppend;
707
708      // If there's exactly one branch-after and no other threads,
709      // we can route it without a switch.
710      if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
711          Scope.getNumBranchAfters() == 1) {
712        assert(!BranchThroughDest || !IsActive);
713
714        // TODO: clean up the possibly dead stores to the cleanup dest slot.
715        llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
716        InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
717
718      // Build a switch-out if we need it:
719      //   - if there are branch-afters threaded through the scope
720      //   - if fall-through is a branch-after
721      //   - if there are fixups that have nowhere left to go and
722      //     so must be immediately resolved
723      } else if (Scope.getNumBranchAfters() ||
724                 (HasFallthrough && !FallthroughIsBranchThrough) ||
725                 (HasFixups && !HasEnclosingCleanups)) {
726
727        llvm::BasicBlock *Default =
728          (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
729
730        // TODO: base this on the number of branch-afters and fixups
731        const unsigned SwitchCapacity = 10;
732
733        llvm::LoadInst *Load =
734          new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
735        llvm::SwitchInst *Switch =
736          llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
737
738        InstsToAppend.push_back(Load);
739        InstsToAppend.push_back(Switch);
740
741        // Branch-after fallthrough.
742        if (FallthroughSource && !FallthroughIsBranchThrough) {
743          FallthroughDest = createBasicBlock("cleanup.cont");
744          if (HasFallthrough)
745            Switch->addCase(Builder.getInt32(0), FallthroughDest);
746        }
747
748        for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
749          Switch->addCase(Scope.getBranchAfterIndex(I),
750                          Scope.getBranchAfterBlock(I));
751        }
752
753        // If there aren't any enclosing cleanups, we can resolve all
754        // the fixups now.
755        if (HasFixups && !HasEnclosingCleanups)
756          ResolveAllBranchFixups(*this, Switch, NormalEntry);
757      } else {
758        // We should always have a branch-through destination in this case.
759        assert(BranchThroughDest);
760        InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
761      }
762
763      // IV.  Pop the cleanup and emit it.
764      EHStack.popCleanup();
765      assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
766
767      EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
768
769      // Append the prepared cleanup prologue from above.
770      llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
771      for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
772        NormalExit->getInstList().push_back(InstsToAppend[I]);
773
774      // Optimistically hope that any fixups will continue falling through.
775      for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
776           I < E; ++I) {
777        BranchFixup &Fixup = EHStack.getBranchFixup(I);
778        if (!Fixup.Destination) continue;
779        if (!Fixup.OptimisticBranchBlock) {
780          new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
781                              getNormalCleanupDestSlot(),
782                              Fixup.InitialBranch);
783          Fixup.InitialBranch->setSuccessor(0, NormalEntry);
784        }
785        Fixup.OptimisticBranchBlock = NormalExit;
786      }
787
788      // V.  Set up the fallthrough edge out.
789
790      // Case 1: a fallthrough source exists but doesn't branch to the
791      // cleanup because the cleanup is inactive.
792      if (!HasFallthrough && FallthroughSource) {
793        // Prebranched fallthrough was forwarded earlier.
794        // Non-prebranched fallthrough doesn't need to be forwarded.
795        // Either way, all we need to do is restore the IP we cleared before.
796        assert(!IsActive);
797        Builder.restoreIP(savedInactiveFallthroughIP);
798
799      // Case 2: a fallthrough source exists and should branch to the
800      // cleanup, but we're not supposed to branch through to the next
801      // cleanup.
802      } else if (HasFallthrough && FallthroughDest) {
803        assert(!FallthroughIsBranchThrough);
804        EmitBlock(FallthroughDest);
805
806      // Case 3: a fallthrough source exists and should branch to the
807      // cleanup and then through to the next.
808      } else if (HasFallthrough) {
809        // Everything is already set up for this.
810
811      // Case 4: no fallthrough source exists.
812      } else {
813        Builder.ClearInsertionPoint();
814      }
815
816      // VI.  Assorted cleaning.
817
818      // Check whether we can merge NormalEntry into a single predecessor.
819      // This might invalidate (non-IR) pointers to NormalEntry.
820      llvm::BasicBlock *NewNormalEntry =
821        SimplifyCleanupEntry(*this, NormalEntry);
822
823      // If it did invalidate those pointers, and NormalEntry was the same
824      // as NormalExit, go back and patch up the fixups.
825      if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
826        for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
827               I < E; ++I)
828          EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
829    }
830  }
831
832  assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
833
834  // Emit the EH cleanup if required.
835  if (RequiresEHCleanup) {
836    CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
837
838    EmitBlock(EHEntry);
839
840    // We only actually emit the cleanup code if the cleanup is either
841    // active or was used before it was deactivated.
842    if (EHActiveFlag || IsActive) {
843      cleanupFlags.setIsForEHCleanup();
844      EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
845    }
846
847    Builder.CreateBr(getEHDispatchBlock(EHParent));
848
849    Builder.restoreIP(SavedIP);
850
851    SimplifyCleanupEntry(*this, EHEntry);
852  }
853}
854
855/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
856/// specified destination obviously has no cleanups to run.  'false' is always
857/// a conservatively correct answer for this method.
858bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
859  assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
860         && "stale jump destination");
861
862  // Calculate the innermost active normal cleanup.
863  EHScopeStack::stable_iterator TopCleanup =
864    EHStack.getInnermostActiveNormalCleanup();
865
866  // If we're not in an active normal cleanup scope, or if the
867  // destination scope is within the innermost active normal cleanup
868  // scope, we don't need to worry about fixups.
869  if (TopCleanup == EHStack.stable_end() ||
870      TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
871    return true;
872
873  // Otherwise, we might need some cleanups.
874  return false;
875}
876
877
878/// Terminate the current block by emitting a branch which might leave
879/// the current cleanup-protected scope.  The target scope may not yet
880/// be known, in which case this will require a fixup.
881///
882/// As a side-effect, this method clears the insertion point.
883void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
884  assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
885         && "stale jump destination");
886
887  if (!HaveInsertPoint())
888    return;
889
890  // Create the branch.
891  llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
892
893  // Calculate the innermost active normal cleanup.
894  EHScopeStack::stable_iterator
895    TopCleanup = EHStack.getInnermostActiveNormalCleanup();
896
897  // If we're not in an active normal cleanup scope, or if the
898  // destination scope is within the innermost active normal cleanup
899  // scope, we don't need to worry about fixups.
900  if (TopCleanup == EHStack.stable_end() ||
901      TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
902    Builder.ClearInsertionPoint();
903    return;
904  }
905
906  // If we can't resolve the destination cleanup scope, just add this
907  // to the current cleanup scope as a branch fixup.
908  if (!Dest.getScopeDepth().isValid()) {
909    BranchFixup &Fixup = EHStack.addBranchFixup();
910    Fixup.Destination = Dest.getBlock();
911    Fixup.DestinationIndex = Dest.getDestIndex();
912    Fixup.InitialBranch = BI;
913    Fixup.OptimisticBranchBlock = 0;
914
915    Builder.ClearInsertionPoint();
916    return;
917  }
918
919  // Otherwise, thread through all the normal cleanups in scope.
920
921  // Store the index at the start.
922  llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
923  new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
924
925  // Adjust BI to point to the first cleanup block.
926  {
927    EHCleanupScope &Scope =
928      cast<EHCleanupScope>(*EHStack.find(TopCleanup));
929    BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
930  }
931
932  // Add this destination to all the scopes involved.
933  EHScopeStack::stable_iterator I = TopCleanup;
934  EHScopeStack::stable_iterator E = Dest.getScopeDepth();
935  if (E.strictlyEncloses(I)) {
936    while (true) {
937      EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
938      assert(Scope.isNormalCleanup());
939      I = Scope.getEnclosingNormalCleanup();
940
941      // If this is the last cleanup we're propagating through, tell it
942      // that there's a resolved jump moving through it.
943      if (!E.strictlyEncloses(I)) {
944        Scope.addBranchAfter(Index, Dest.getBlock());
945        break;
946      }
947
948      // Otherwise, tell the scope that there's a jump propoagating
949      // through it.  If this isn't new information, all the rest of
950      // the work has been done before.
951      if (!Scope.addBranchThrough(Dest.getBlock()))
952        break;
953    }
954  }
955
956  Builder.ClearInsertionPoint();
957}
958
959static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
960                                  EHScopeStack::stable_iterator C) {
961  // If we needed a normal block for any reason, that counts.
962  if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
963    return true;
964
965  // Check whether any enclosed cleanups were needed.
966  for (EHScopeStack::stable_iterator
967         I = EHStack.getInnermostNormalCleanup();
968         I != C; ) {
969    assert(C.strictlyEncloses(I));
970    EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
971    if (S.getNormalBlock()) return true;
972    I = S.getEnclosingNormalCleanup();
973  }
974
975  return false;
976}
977
978static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
979                              EHScopeStack::stable_iterator cleanup) {
980  // If we needed an EH block for any reason, that counts.
981  if (EHStack.find(cleanup)->hasEHBranches())
982    return true;
983
984  // Check whether any enclosed cleanups were needed.
985  for (EHScopeStack::stable_iterator
986         i = EHStack.getInnermostEHScope(); i != cleanup; ) {
987    assert(cleanup.strictlyEncloses(i));
988
989    EHScope &scope = *EHStack.find(i);
990    if (scope.hasEHBranches())
991      return true;
992
993    i = scope.getEnclosingEHScope();
994  }
995
996  return false;
997}
998
999enum ForActivation_t {
1000  ForActivation,
1001  ForDeactivation
1002};
1003
1004/// The given cleanup block is changing activation state.  Configure a
1005/// cleanup variable if necessary.
1006///
1007/// It would be good if we had some way of determining if there were
1008/// extra uses *after* the change-over point.
1009static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
1010                                        EHScopeStack::stable_iterator C,
1011                                        ForActivation_t kind,
1012                                        llvm::Instruction *dominatingIP) {
1013  EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
1014
1015  // We always need the flag if we're activating the cleanup in a
1016  // conditional context, because we have to assume that the current
1017  // location doesn't necessarily dominate the cleanup's code.
1018  bool isActivatedInConditional =
1019    (kind == ForActivation && CGF.isInConditionalBranch());
1020
1021  bool needFlag = false;
1022
1023  // Calculate whether the cleanup was used:
1024
1025  //   - as a normal cleanup
1026  if (Scope.isNormalCleanup() &&
1027      (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
1028    Scope.setTestFlagInNormalCleanup();
1029    needFlag = true;
1030  }
1031
1032  //  - as an EH cleanup
1033  if (Scope.isEHCleanup() &&
1034      (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
1035    Scope.setTestFlagInEHCleanup();
1036    needFlag = true;
1037  }
1038
1039  // If it hasn't yet been used as either, we're done.
1040  if (!needFlag) return;
1041
1042  llvm::AllocaInst *var = Scope.getActiveFlag();
1043  if (!var) {
1044    var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
1045    Scope.setActiveFlag(var);
1046
1047    assert(dominatingIP && "no existing variable and no dominating IP!");
1048
1049    // Initialize to true or false depending on whether it was
1050    // active up to this point.
1051    llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
1052
1053    // If we're in a conditional block, ignore the dominating IP and
1054    // use the outermost conditional branch.
1055    if (CGF.isInConditionalBranch()) {
1056      CGF.setBeforeOutermostConditional(value, var);
1057    } else {
1058      new llvm::StoreInst(value, var, dominatingIP);
1059    }
1060  }
1061
1062  CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
1063}
1064
1065/// Activate a cleanup that was created in an inactivated state.
1066void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
1067                                           llvm::Instruction *dominatingIP) {
1068  assert(C != EHStack.stable_end() && "activating bottom of stack?");
1069  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1070  assert(!Scope.isActive() && "double activation");
1071
1072  SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
1073
1074  Scope.setActive(true);
1075}
1076
1077/// Deactive a cleanup that was created in an active state.
1078void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
1079                                             llvm::Instruction *dominatingIP) {
1080  assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
1081  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1082  assert(Scope.isActive() && "double deactivation");
1083
1084  // If it's the top of the stack, just pop it.
1085  if (C == EHStack.stable_begin()) {
1086    // If it's a normal cleanup, we need to pretend that the
1087    // fallthrough is unreachable.
1088    CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
1089    PopCleanupBlock();
1090    Builder.restoreIP(SavedIP);
1091    return;
1092  }
1093
1094  // Otherwise, follow the general case.
1095  SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
1096
1097  Scope.setActive(false);
1098}
1099
1100llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
1101  if (!NormalCleanupDest)
1102    NormalCleanupDest =
1103      CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
1104  return NormalCleanupDest;
1105}
1106
1107/// Emits all the code to cause the given temporary to be cleaned up.
1108void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
1109                                       QualType TempType,
1110                                       llvm::Value *Ptr) {
1111  pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
1112              /*useEHCleanup*/ true);
1113}
1114