1//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains code dealing with the IR generation for cleanups
11// and related information.
12//
13// A "cleanup" is a piece of code which needs to be executed whenever
14// control transfers out of a particular scope.  This can be
15// conditionalized to occur only on exceptional control flow, only on
16// normal control flow, or both.
17//
18//===----------------------------------------------------------------------===//
19
20#include "CodeGenFunction.h"
21#include "CGCleanup.h"
22
23using namespace clang;
24using namespace CodeGen;
25
26bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
27  if (rv.isScalar())
28    return DominatingLLVMValue::needsSaving(rv.getScalarVal());
29  if (rv.isAggregate())
30    return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
31  return true;
32}
33
34DominatingValue<RValue>::saved_type
35DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
36  if (rv.isScalar()) {
37    llvm::Value *V = rv.getScalarVal();
38
39    // These automatically dominate and don't need to be saved.
40    if (!DominatingLLVMValue::needsSaving(V))
41      return saved_type(V, ScalarLiteral);
42
43    // Everything else needs an alloca.
44    llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
45    CGF.Builder.CreateStore(V, addr);
46    return saved_type(addr, ScalarAddress);
47  }
48
49  if (rv.isComplex()) {
50    CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
51    llvm::Type *ComplexTy =
52      llvm::StructType::get(V.first->getType(), V.second->getType(),
53                            (void*) 0);
54    llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
55    CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
56    return saved_type(addr, ComplexAddress);
57  }
58
59  assert(rv.isAggregate());
60  llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
61  if (!DominatingLLVMValue::needsSaving(V))
62    return saved_type(V, AggregateLiteral);
63
64  llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
65  CGF.Builder.CreateStore(V, addr);
66  return saved_type(addr, AggregateAddress);
67}
68
69/// Given a saved r-value produced by SaveRValue, perform the code
70/// necessary to restore it to usability at the current insertion
71/// point.
72RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
73  switch (K) {
74  case ScalarLiteral:
75    return RValue::get(Value);
76  case ScalarAddress:
77    return RValue::get(CGF.Builder.CreateLoad(Value));
78  case AggregateLiteral:
79    return RValue::getAggregate(Value);
80  case AggregateAddress:
81    return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
82  case ComplexAddress:
83    return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false));
84  }
85
86  llvm_unreachable("bad saved r-value kind");
87}
88
89/// Push an entry of the given size onto this protected-scope stack.
90char *EHScopeStack::allocate(size_t Size) {
91  if (!StartOfBuffer) {
92    unsigned Capacity = 1024;
93    while (Capacity < Size) Capacity *= 2;
94    StartOfBuffer = new char[Capacity];
95    StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
96  } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
97    unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
98    unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
99
100    unsigned NewCapacity = CurrentCapacity;
101    do {
102      NewCapacity *= 2;
103    } while (NewCapacity < UsedCapacity + Size);
104
105    char *NewStartOfBuffer = new char[NewCapacity];
106    char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
107    char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
108    memcpy(NewStartOfData, StartOfData, UsedCapacity);
109    delete [] StartOfBuffer;
110    StartOfBuffer = NewStartOfBuffer;
111    EndOfBuffer = NewEndOfBuffer;
112    StartOfData = NewStartOfData;
113  }
114
115  assert(StartOfBuffer + Size <= StartOfData);
116  StartOfData -= Size;
117  return StartOfData;
118}
119
120EHScopeStack::stable_iterator
121EHScopeStack::getInnermostActiveNormalCleanup() const {
122  for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
123         si != se; ) {
124    EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
125    if (cleanup.isActive()) return si;
126    si = cleanup.getEnclosingNormalCleanup();
127  }
128  return stable_end();
129}
130
131EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
132  for (stable_iterator si = getInnermostEHScope(), se = stable_end();
133         si != se; ) {
134    // Skip over inactive cleanups.
135    EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
136    if (cleanup && !cleanup->isActive()) {
137      si = cleanup->getEnclosingEHScope();
138      continue;
139    }
140
141    // All other scopes are always active.
142    return si;
143  }
144
145  return stable_end();
146}
147
148
149void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
150  assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
151  char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
152  bool IsNormalCleanup = Kind & NormalCleanup;
153  bool IsEHCleanup = Kind & EHCleanup;
154  bool IsActive = !(Kind & InactiveCleanup);
155  EHCleanupScope *Scope =
156    new (Buffer) EHCleanupScope(IsNormalCleanup,
157                                IsEHCleanup,
158                                IsActive,
159                                Size,
160                                BranchFixups.size(),
161                                InnermostNormalCleanup,
162                                InnermostEHScope);
163  if (IsNormalCleanup)
164    InnermostNormalCleanup = stable_begin();
165  if (IsEHCleanup)
166    InnermostEHScope = stable_begin();
167
168  return Scope->getCleanupBuffer();
169}
170
171void EHScopeStack::popCleanup() {
172  assert(!empty() && "popping exception stack when not empty");
173
174  assert(isa<EHCleanupScope>(*begin()));
175  EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
176  InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
177  InnermostEHScope = Cleanup.getEnclosingEHScope();
178  StartOfData += Cleanup.getAllocatedSize();
179
180  // Destroy the cleanup.
181  Cleanup.~EHCleanupScope();
182
183  // Check whether we can shrink the branch-fixups stack.
184  if (!BranchFixups.empty()) {
185    // If we no longer have any normal cleanups, all the fixups are
186    // complete.
187    if (!hasNormalCleanups())
188      BranchFixups.clear();
189
190    // Otherwise we can still trim out unnecessary nulls.
191    else
192      popNullFixups();
193  }
194}
195
196EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
197  assert(getInnermostEHScope() == stable_end());
198  char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
199  EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
200  InnermostEHScope = stable_begin();
201  return filter;
202}
203
204void EHScopeStack::popFilter() {
205  assert(!empty() && "popping exception stack when not empty");
206
207  EHFilterScope &filter = cast<EHFilterScope>(*begin());
208  StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
209
210  InnermostEHScope = filter.getEnclosingEHScope();
211}
212
213EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
214  char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
215  EHCatchScope *scope =
216    new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
217  InnermostEHScope = stable_begin();
218  return scope;
219}
220
221void EHScopeStack::pushTerminate() {
222  char *Buffer = allocate(EHTerminateScope::getSize());
223  new (Buffer) EHTerminateScope(InnermostEHScope);
224  InnermostEHScope = stable_begin();
225}
226
227/// Remove any 'null' fixups on the stack.  However, we can't pop more
228/// fixups than the fixup depth on the innermost normal cleanup, or
229/// else fixups that we try to add to that cleanup will end up in the
230/// wrong place.  We *could* try to shrink fixup depths, but that's
231/// actually a lot of work for little benefit.
232void EHScopeStack::popNullFixups() {
233  // We expect this to only be called when there's still an innermost
234  // normal cleanup;  otherwise there really shouldn't be any fixups.
235  assert(hasNormalCleanups());
236
237  EHScopeStack::iterator it = find(InnermostNormalCleanup);
238  unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
239  assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
240
241  while (BranchFixups.size() > MinSize &&
242         BranchFixups.back().Destination == 0)
243    BranchFixups.pop_back();
244}
245
246void CodeGenFunction::initFullExprCleanup() {
247  // Create a variable to decide whether the cleanup needs to be run.
248  llvm::AllocaInst *active
249    = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
250
251  // Initialize it to false at a site that's guaranteed to be run
252  // before each evaluation.
253  setBeforeOutermostConditional(Builder.getFalse(), active);
254
255  // Initialize it to true at the current location.
256  Builder.CreateStore(Builder.getTrue(), active);
257
258  // Set that as the active flag in the cleanup.
259  EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
260  assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
261  cleanup.setActiveFlag(active);
262
263  if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
264  if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
265}
266
267void EHScopeStack::Cleanup::anchor() {}
268
269/// All the branch fixups on the EH stack have propagated out past the
270/// outermost normal cleanup; resolve them all by adding cases to the
271/// given switch instruction.
272static void ResolveAllBranchFixups(CodeGenFunction &CGF,
273                                   llvm::SwitchInst *Switch,
274                                   llvm::BasicBlock *CleanupEntry) {
275  llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
276
277  for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
278    // Skip this fixup if its destination isn't set.
279    BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
280    if (Fixup.Destination == 0) continue;
281
282    // If there isn't an OptimisticBranchBlock, then InitialBranch is
283    // still pointing directly to its destination; forward it to the
284    // appropriate cleanup entry.  This is required in the specific
285    // case of
286    //   { std::string s; goto lbl; }
287    //   lbl:
288    // i.e. where there's an unresolved fixup inside a single cleanup
289    // entry which we're currently popping.
290    if (Fixup.OptimisticBranchBlock == 0) {
291      new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
292                          CGF.getNormalCleanupDestSlot(),
293                          Fixup.InitialBranch);
294      Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
295    }
296
297    // Don't add this case to the switch statement twice.
298    if (!CasesAdded.insert(Fixup.Destination)) continue;
299
300    Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
301                    Fixup.Destination);
302  }
303
304  CGF.EHStack.clearFixups();
305}
306
307/// Transitions the terminator of the given exit-block of a cleanup to
308/// be a cleanup switch.
309static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
310                                                   llvm::BasicBlock *Block) {
311  // If it's a branch, turn it into a switch whose default
312  // destination is its original target.
313  llvm::TerminatorInst *Term = Block->getTerminator();
314  assert(Term && "can't transition block without terminator");
315
316  if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
317    assert(Br->isUnconditional());
318    llvm::LoadInst *Load =
319      new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
320    llvm::SwitchInst *Switch =
321      llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
322    Br->eraseFromParent();
323    return Switch;
324  } else {
325    return cast<llvm::SwitchInst>(Term);
326  }
327}
328
329void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
330  assert(Block && "resolving a null target block");
331  if (!EHStack.getNumBranchFixups()) return;
332
333  assert(EHStack.hasNormalCleanups() &&
334         "branch fixups exist with no normal cleanups on stack");
335
336  llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
337  bool ResolvedAny = false;
338
339  for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
340    // Skip this fixup if its destination doesn't match.
341    BranchFixup &Fixup = EHStack.getBranchFixup(I);
342    if (Fixup.Destination != Block) continue;
343
344    Fixup.Destination = 0;
345    ResolvedAny = true;
346
347    // If it doesn't have an optimistic branch block, LatestBranch is
348    // already pointing to the right place.
349    llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
350    if (!BranchBB)
351      continue;
352
353    // Don't process the same optimistic branch block twice.
354    if (!ModifiedOptimisticBlocks.insert(BranchBB))
355      continue;
356
357    llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
358
359    // Add a case to the switch.
360    Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
361  }
362
363  if (ResolvedAny)
364    EHStack.popNullFixups();
365}
366
367/// Pops cleanup blocks until the given savepoint is reached.
368void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
369  assert(Old.isValid());
370
371  while (EHStack.stable_begin() != Old) {
372    EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
373
374    // As long as Old strictly encloses the scope's enclosing normal
375    // cleanup, we're going to emit another normal cleanup which
376    // fallthrough can propagate through.
377    bool FallThroughIsBranchThrough =
378      Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
379
380    PopCleanupBlock(FallThroughIsBranchThrough);
381  }
382}
383
384static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
385                                           EHCleanupScope &Scope) {
386  assert(Scope.isNormalCleanup());
387  llvm::BasicBlock *Entry = Scope.getNormalBlock();
388  if (!Entry) {
389    Entry = CGF.createBasicBlock("cleanup");
390    Scope.setNormalBlock(Entry);
391  }
392  return Entry;
393}
394
395/// Attempts to reduce a cleanup's entry block to a fallthrough.  This
396/// is basically llvm::MergeBlockIntoPredecessor, except
397/// simplified/optimized for the tighter constraints on cleanup blocks.
398///
399/// Returns the new block, whatever it is.
400static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
401                                              llvm::BasicBlock *Entry) {
402  llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
403  if (!Pred) return Entry;
404
405  llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
406  if (!Br || Br->isConditional()) return Entry;
407  assert(Br->getSuccessor(0) == Entry);
408
409  // If we were previously inserting at the end of the cleanup entry
410  // block, we'll need to continue inserting at the end of the
411  // predecessor.
412  bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
413  assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
414
415  // Kill the branch.
416  Br->eraseFromParent();
417
418  // Replace all uses of the entry with the predecessor, in case there
419  // are phis in the cleanup.
420  Entry->replaceAllUsesWith(Pred);
421
422  // Merge the blocks.
423  Pred->getInstList().splice(Pred->end(), Entry->getInstList());
424
425  // Kill the entry block.
426  Entry->eraseFromParent();
427
428  if (WasInsertBlock)
429    CGF.Builder.SetInsertPoint(Pred);
430
431  return Pred;
432}
433
434static void EmitCleanup(CodeGenFunction &CGF,
435                        EHScopeStack::Cleanup *Fn,
436                        EHScopeStack::Cleanup::Flags flags,
437                        llvm::Value *ActiveFlag) {
438  // EH cleanups always occur within a terminate scope.
439  if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
440
441  // If there's an active flag, load it and skip the cleanup if it's
442  // false.
443  llvm::BasicBlock *ContBB = 0;
444  if (ActiveFlag) {
445    ContBB = CGF.createBasicBlock("cleanup.done");
446    llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
447    llvm::Value *IsActive
448      = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
449    CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
450    CGF.EmitBlock(CleanupBB);
451  }
452
453  // Ask the cleanup to emit itself.
454  Fn->Emit(CGF, flags);
455  assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
456
457  // Emit the continuation block if there was an active flag.
458  if (ActiveFlag)
459    CGF.EmitBlock(ContBB);
460
461  // Leave the terminate scope.
462  if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
463}
464
465static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
466                                          llvm::BasicBlock *From,
467                                          llvm::BasicBlock *To) {
468  // Exit is the exit block of a cleanup, so it always terminates in
469  // an unconditional branch or a switch.
470  llvm::TerminatorInst *Term = Exit->getTerminator();
471
472  if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
473    assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
474    Br->setSuccessor(0, To);
475  } else {
476    llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
477    for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
478      if (Switch->getSuccessor(I) == From)
479        Switch->setSuccessor(I, To);
480  }
481}
482
483/// We don't need a normal entry block for the given cleanup.
484/// Optimistic fixup branches can cause these blocks to come into
485/// existence anyway;  if so, destroy it.
486///
487/// The validity of this transformation is very much specific to the
488/// exact ways in which we form branches to cleanup entries.
489static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
490                                         EHCleanupScope &scope) {
491  llvm::BasicBlock *entry = scope.getNormalBlock();
492  if (!entry) return;
493
494  // Replace all the uses with unreachable.
495  llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
496  for (llvm::BasicBlock::use_iterator
497         i = entry->use_begin(), e = entry->use_end(); i != e; ) {
498    llvm::Use &use = i.getUse();
499    ++i;
500
501    use.set(unreachableBB);
502
503    // The only uses should be fixup switches.
504    llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
505    if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
506      // Replace the switch with a branch.
507      llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
508
509      // The switch operand is a load from the cleanup-dest alloca.
510      llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
511
512      // Destroy the switch.
513      si->eraseFromParent();
514
515      // Destroy the load.
516      assert(condition->getOperand(0) == CGF.NormalCleanupDest);
517      assert(condition->use_empty());
518      condition->eraseFromParent();
519    }
520  }
521
522  assert(entry->use_empty());
523  delete entry;
524}
525
526/// Pops a cleanup block.  If the block includes a normal cleanup, the
527/// current insertion point is threaded through the cleanup, as are
528/// any branch fixups on the cleanup.
529void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
530  assert(!EHStack.empty() && "cleanup stack is empty!");
531  assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
532  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
533  assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
534
535  // Remember activation information.
536  bool IsActive = Scope.isActive();
537  llvm::Value *NormalActiveFlag =
538    Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
539  llvm::Value *EHActiveFlag =
540    Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
541
542  // Check whether we need an EH cleanup.  This is only true if we've
543  // generated a lazy EH cleanup block.
544  llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
545  assert(Scope.hasEHBranches() == (EHEntry != 0));
546  bool RequiresEHCleanup = (EHEntry != 0);
547  EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
548
549  // Check the three conditions which might require a normal cleanup:
550
551  // - whether there are branch fix-ups through this cleanup
552  unsigned FixupDepth = Scope.getFixupDepth();
553  bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
554
555  // - whether there are branch-throughs or branch-afters
556  bool HasExistingBranches = Scope.hasBranches();
557
558  // - whether there's a fallthrough
559  llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
560  bool HasFallthrough = (FallthroughSource != 0 && IsActive);
561
562  // Branch-through fall-throughs leave the insertion point set to the
563  // end of the last cleanup, which points to the current scope.  The
564  // rest of IR gen doesn't need to worry about this; it only happens
565  // during the execution of PopCleanupBlocks().
566  bool HasPrebranchedFallthrough =
567    (FallthroughSource && FallthroughSource->getTerminator());
568
569  // If this is a normal cleanup, then having a prebranched
570  // fallthrough implies that the fallthrough source unconditionally
571  // jumps here.
572  assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
573         (Scope.getNormalBlock() &&
574          FallthroughSource->getTerminator()->getSuccessor(0)
575            == Scope.getNormalBlock()));
576
577  bool RequiresNormalCleanup = false;
578  if (Scope.isNormalCleanup() &&
579      (HasFixups || HasExistingBranches || HasFallthrough)) {
580    RequiresNormalCleanup = true;
581  }
582
583  // If we have a prebranched fallthrough into an inactive normal
584  // cleanup, rewrite it so that it leads to the appropriate place.
585  if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
586    llvm::BasicBlock *prebranchDest;
587
588    // If the prebranch is semantically branching through the next
589    // cleanup, just forward it to the next block, leaving the
590    // insertion point in the prebranched block.
591    if (FallthroughIsBranchThrough) {
592      EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
593      prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
594
595    // Otherwise, we need to make a new block.  If the normal cleanup
596    // isn't being used at all, we could actually reuse the normal
597    // entry block, but this is simpler, and it avoids conflicts with
598    // dead optimistic fixup branches.
599    } else {
600      prebranchDest = createBasicBlock("forwarded-prebranch");
601      EmitBlock(prebranchDest);
602    }
603
604    llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
605    assert(normalEntry && !normalEntry->use_empty());
606
607    ForwardPrebranchedFallthrough(FallthroughSource,
608                                  normalEntry, prebranchDest);
609  }
610
611  // If we don't need the cleanup at all, we're done.
612  if (!RequiresNormalCleanup && !RequiresEHCleanup) {
613    destroyOptimisticNormalEntry(*this, Scope);
614    EHStack.popCleanup(); // safe because there are no fixups
615    assert(EHStack.getNumBranchFixups() == 0 ||
616           EHStack.hasNormalCleanups());
617    return;
618  }
619
620  // Copy the cleanup emission data out.  Note that SmallVector
621  // guarantees maximal alignment for its buffer regardless of its
622  // type parameter.
623  SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
624  CleanupBuffer.reserve(Scope.getCleanupSize());
625  memcpy(CleanupBuffer.data(),
626         Scope.getCleanupBuffer(), Scope.getCleanupSize());
627  CleanupBuffer.set_size(Scope.getCleanupSize());
628  EHScopeStack::Cleanup *Fn =
629    reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
630
631  EHScopeStack::Cleanup::Flags cleanupFlags;
632  if (Scope.isNormalCleanup())
633    cleanupFlags.setIsNormalCleanupKind();
634  if (Scope.isEHCleanup())
635    cleanupFlags.setIsEHCleanupKind();
636
637  if (!RequiresNormalCleanup) {
638    destroyOptimisticNormalEntry(*this, Scope);
639    EHStack.popCleanup();
640  } else {
641    // If we have a fallthrough and no other need for the cleanup,
642    // emit it directly.
643    if (HasFallthrough && !HasPrebranchedFallthrough &&
644        !HasFixups && !HasExistingBranches) {
645
646      destroyOptimisticNormalEntry(*this, Scope);
647      EHStack.popCleanup();
648
649      EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
650
651    // Otherwise, the best approach is to thread everything through
652    // the cleanup block and then try to clean up after ourselves.
653    } else {
654      // Force the entry block to exist.
655      llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
656
657      // I.  Set up the fallthrough edge in.
658
659      CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
660
661      // If there's a fallthrough, we need to store the cleanup
662      // destination index.  For fall-throughs this is always zero.
663      if (HasFallthrough) {
664        if (!HasPrebranchedFallthrough)
665          Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
666
667      // Otherwise, save and clear the IP if we don't have fallthrough
668      // because the cleanup is inactive.
669      } else if (FallthroughSource) {
670        assert(!IsActive && "source without fallthrough for active cleanup");
671        savedInactiveFallthroughIP = Builder.saveAndClearIP();
672      }
673
674      // II.  Emit the entry block.  This implicitly branches to it if
675      // we have fallthrough.  All the fixups and existing branches
676      // should already be branched to it.
677      EmitBlock(NormalEntry);
678
679      // III.  Figure out where we're going and build the cleanup
680      // epilogue.
681
682      bool HasEnclosingCleanups =
683        (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
684
685      // Compute the branch-through dest if we need it:
686      //   - if there are branch-throughs threaded through the scope
687      //   - if fall-through is a branch-through
688      //   - if there are fixups that will be optimistically forwarded
689      //     to the enclosing cleanup
690      llvm::BasicBlock *BranchThroughDest = 0;
691      if (Scope.hasBranchThroughs() ||
692          (FallthroughSource && FallthroughIsBranchThrough) ||
693          (HasFixups && HasEnclosingCleanups)) {
694        assert(HasEnclosingCleanups);
695        EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
696        BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
697      }
698
699      llvm::BasicBlock *FallthroughDest = 0;
700      SmallVector<llvm::Instruction*, 2> InstsToAppend;
701
702      // If there's exactly one branch-after and no other threads,
703      // we can route it without a switch.
704      if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
705          Scope.getNumBranchAfters() == 1) {
706        assert(!BranchThroughDest || !IsActive);
707
708        // TODO: clean up the possibly dead stores to the cleanup dest slot.
709        llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
710        InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
711
712      // Build a switch-out if we need it:
713      //   - if there are branch-afters threaded through the scope
714      //   - if fall-through is a branch-after
715      //   - if there are fixups that have nowhere left to go and
716      //     so must be immediately resolved
717      } else if (Scope.getNumBranchAfters() ||
718                 (HasFallthrough && !FallthroughIsBranchThrough) ||
719                 (HasFixups && !HasEnclosingCleanups)) {
720
721        llvm::BasicBlock *Default =
722          (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
723
724        // TODO: base this on the number of branch-afters and fixups
725        const unsigned SwitchCapacity = 10;
726
727        llvm::LoadInst *Load =
728          new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
729        llvm::SwitchInst *Switch =
730          llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
731
732        InstsToAppend.push_back(Load);
733        InstsToAppend.push_back(Switch);
734
735        // Branch-after fallthrough.
736        if (FallthroughSource && !FallthroughIsBranchThrough) {
737          FallthroughDest = createBasicBlock("cleanup.cont");
738          if (HasFallthrough)
739            Switch->addCase(Builder.getInt32(0), FallthroughDest);
740        }
741
742        for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
743          Switch->addCase(Scope.getBranchAfterIndex(I),
744                          Scope.getBranchAfterBlock(I));
745        }
746
747        // If there aren't any enclosing cleanups, we can resolve all
748        // the fixups now.
749        if (HasFixups && !HasEnclosingCleanups)
750          ResolveAllBranchFixups(*this, Switch, NormalEntry);
751      } else {
752        // We should always have a branch-through destination in this case.
753        assert(BranchThroughDest);
754        InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
755      }
756
757      // IV.  Pop the cleanup and emit it.
758      EHStack.popCleanup();
759      assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
760
761      EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
762
763      // Append the prepared cleanup prologue from above.
764      llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
765      for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
766        NormalExit->getInstList().push_back(InstsToAppend[I]);
767
768      // Optimistically hope that any fixups will continue falling through.
769      for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
770           I < E; ++I) {
771        BranchFixup &Fixup = EHStack.getBranchFixup(I);
772        if (!Fixup.Destination) continue;
773        if (!Fixup.OptimisticBranchBlock) {
774          new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
775                              getNormalCleanupDestSlot(),
776                              Fixup.InitialBranch);
777          Fixup.InitialBranch->setSuccessor(0, NormalEntry);
778        }
779        Fixup.OptimisticBranchBlock = NormalExit;
780      }
781
782      // V.  Set up the fallthrough edge out.
783
784      // Case 1: a fallthrough source exists but doesn't branch to the
785      // cleanup because the cleanup is inactive.
786      if (!HasFallthrough && FallthroughSource) {
787        // Prebranched fallthrough was forwarded earlier.
788        // Non-prebranched fallthrough doesn't need to be forwarded.
789        // Either way, all we need to do is restore the IP we cleared before.
790        assert(!IsActive);
791        Builder.restoreIP(savedInactiveFallthroughIP);
792
793      // Case 2: a fallthrough source exists and should branch to the
794      // cleanup, but we're not supposed to branch through to the next
795      // cleanup.
796      } else if (HasFallthrough && FallthroughDest) {
797        assert(!FallthroughIsBranchThrough);
798        EmitBlock(FallthroughDest);
799
800      // Case 3: a fallthrough source exists and should branch to the
801      // cleanup and then through to the next.
802      } else if (HasFallthrough) {
803        // Everything is already set up for this.
804
805      // Case 4: no fallthrough source exists.
806      } else {
807        Builder.ClearInsertionPoint();
808      }
809
810      // VI.  Assorted cleaning.
811
812      // Check whether we can merge NormalEntry into a single predecessor.
813      // This might invalidate (non-IR) pointers to NormalEntry.
814      llvm::BasicBlock *NewNormalEntry =
815        SimplifyCleanupEntry(*this, NormalEntry);
816
817      // If it did invalidate those pointers, and NormalEntry was the same
818      // as NormalExit, go back and patch up the fixups.
819      if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
820        for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
821               I < E; ++I)
822          EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
823    }
824  }
825
826  assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
827
828  // Emit the EH cleanup if required.
829  if (RequiresEHCleanup) {
830    CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
831
832    EmitBlock(EHEntry);
833
834    // We only actually emit the cleanup code if the cleanup is either
835    // active or was used before it was deactivated.
836    if (EHActiveFlag || IsActive) {
837      cleanupFlags.setIsForEHCleanup();
838      EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
839    }
840
841    Builder.CreateBr(getEHDispatchBlock(EHParent));
842
843    Builder.restoreIP(SavedIP);
844
845    SimplifyCleanupEntry(*this, EHEntry);
846  }
847}
848
849/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
850/// specified destination obviously has no cleanups to run.  'false' is always
851/// a conservatively correct answer for this method.
852bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
853  assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
854         && "stale jump destination");
855
856  // Calculate the innermost active normal cleanup.
857  EHScopeStack::stable_iterator TopCleanup =
858    EHStack.getInnermostActiveNormalCleanup();
859
860  // If we're not in an active normal cleanup scope, or if the
861  // destination scope is within the innermost active normal cleanup
862  // scope, we don't need to worry about fixups.
863  if (TopCleanup == EHStack.stable_end() ||
864      TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
865    return true;
866
867  // Otherwise, we might need some cleanups.
868  return false;
869}
870
871
872/// Terminate the current block by emitting a branch which might leave
873/// the current cleanup-protected scope.  The target scope may not yet
874/// be known, in which case this will require a fixup.
875///
876/// As a side-effect, this method clears the insertion point.
877void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
878  assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
879         && "stale jump destination");
880
881  if (!HaveInsertPoint())
882    return;
883
884  // Create the branch.
885  llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
886
887  // Calculate the innermost active normal cleanup.
888  EHScopeStack::stable_iterator
889    TopCleanup = EHStack.getInnermostActiveNormalCleanup();
890
891  // If we're not in an active normal cleanup scope, or if the
892  // destination scope is within the innermost active normal cleanup
893  // scope, we don't need to worry about fixups.
894  if (TopCleanup == EHStack.stable_end() ||
895      TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
896    Builder.ClearInsertionPoint();
897    return;
898  }
899
900  // If we can't resolve the destination cleanup scope, just add this
901  // to the current cleanup scope as a branch fixup.
902  if (!Dest.getScopeDepth().isValid()) {
903    BranchFixup &Fixup = EHStack.addBranchFixup();
904    Fixup.Destination = Dest.getBlock();
905    Fixup.DestinationIndex = Dest.getDestIndex();
906    Fixup.InitialBranch = BI;
907    Fixup.OptimisticBranchBlock = 0;
908
909    Builder.ClearInsertionPoint();
910    return;
911  }
912
913  // Otherwise, thread through all the normal cleanups in scope.
914
915  // Store the index at the start.
916  llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
917  new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
918
919  // Adjust BI to point to the first cleanup block.
920  {
921    EHCleanupScope &Scope =
922      cast<EHCleanupScope>(*EHStack.find(TopCleanup));
923    BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
924  }
925
926  // Add this destination to all the scopes involved.
927  EHScopeStack::stable_iterator I = TopCleanup;
928  EHScopeStack::stable_iterator E = Dest.getScopeDepth();
929  if (E.strictlyEncloses(I)) {
930    while (true) {
931      EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
932      assert(Scope.isNormalCleanup());
933      I = Scope.getEnclosingNormalCleanup();
934
935      // If this is the last cleanup we're propagating through, tell it
936      // that there's a resolved jump moving through it.
937      if (!E.strictlyEncloses(I)) {
938        Scope.addBranchAfter(Index, Dest.getBlock());
939        break;
940      }
941
942      // Otherwise, tell the scope that there's a jump propoagating
943      // through it.  If this isn't new information, all the rest of
944      // the work has been done before.
945      if (!Scope.addBranchThrough(Dest.getBlock()))
946        break;
947    }
948  }
949
950  Builder.ClearInsertionPoint();
951}
952
953static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
954                                  EHScopeStack::stable_iterator C) {
955  // If we needed a normal block for any reason, that counts.
956  if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
957    return true;
958
959  // Check whether any enclosed cleanups were needed.
960  for (EHScopeStack::stable_iterator
961         I = EHStack.getInnermostNormalCleanup();
962         I != C; ) {
963    assert(C.strictlyEncloses(I));
964    EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
965    if (S.getNormalBlock()) return true;
966    I = S.getEnclosingNormalCleanup();
967  }
968
969  return false;
970}
971
972static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
973                              EHScopeStack::stable_iterator cleanup) {
974  // If we needed an EH block for any reason, that counts.
975  if (EHStack.find(cleanup)->hasEHBranches())
976    return true;
977
978  // Check whether any enclosed cleanups were needed.
979  for (EHScopeStack::stable_iterator
980         i = EHStack.getInnermostEHScope(); i != cleanup; ) {
981    assert(cleanup.strictlyEncloses(i));
982
983    EHScope &scope = *EHStack.find(i);
984    if (scope.hasEHBranches())
985      return true;
986
987    i = scope.getEnclosingEHScope();
988  }
989
990  return false;
991}
992
993enum ForActivation_t {
994  ForActivation,
995  ForDeactivation
996};
997
998/// The given cleanup block is changing activation state.  Configure a
999/// cleanup variable if necessary.
1000///
1001/// It would be good if we had some way of determining if there were
1002/// extra uses *after* the change-over point.
1003static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
1004                                        EHScopeStack::stable_iterator C,
1005                                        ForActivation_t kind,
1006                                        llvm::Instruction *dominatingIP) {
1007  EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
1008
1009  // We always need the flag if we're activating the cleanup in a
1010  // conditional context, because we have to assume that the current
1011  // location doesn't necessarily dominate the cleanup's code.
1012  bool isActivatedInConditional =
1013    (kind == ForActivation && CGF.isInConditionalBranch());
1014
1015  bool needFlag = false;
1016
1017  // Calculate whether the cleanup was used:
1018
1019  //   - as a normal cleanup
1020  if (Scope.isNormalCleanup() &&
1021      (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
1022    Scope.setTestFlagInNormalCleanup();
1023    needFlag = true;
1024  }
1025
1026  //  - as an EH cleanup
1027  if (Scope.isEHCleanup() &&
1028      (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
1029    Scope.setTestFlagInEHCleanup();
1030    needFlag = true;
1031  }
1032
1033  // If it hasn't yet been used as either, we're done.
1034  if (!needFlag) return;
1035
1036  llvm::AllocaInst *var = Scope.getActiveFlag();
1037  if (!var) {
1038    var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
1039    Scope.setActiveFlag(var);
1040
1041    assert(dominatingIP && "no existing variable and no dominating IP!");
1042
1043    // Initialize to true or false depending on whether it was
1044    // active up to this point.
1045    llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
1046
1047    // If we're in a conditional block, ignore the dominating IP and
1048    // use the outermost conditional branch.
1049    if (CGF.isInConditionalBranch()) {
1050      CGF.setBeforeOutermostConditional(value, var);
1051    } else {
1052      new llvm::StoreInst(value, var, dominatingIP);
1053    }
1054  }
1055
1056  CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
1057}
1058
1059/// Activate a cleanup that was created in an inactivated state.
1060void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
1061                                           llvm::Instruction *dominatingIP) {
1062  assert(C != EHStack.stable_end() && "activating bottom of stack?");
1063  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1064  assert(!Scope.isActive() && "double activation");
1065
1066  SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
1067
1068  Scope.setActive(true);
1069}
1070
1071/// Deactive a cleanup that was created in an active state.
1072void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
1073                                             llvm::Instruction *dominatingIP) {
1074  assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
1075  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1076  assert(Scope.isActive() && "double deactivation");
1077
1078  // If it's the top of the stack, just pop it.
1079  if (C == EHStack.stable_begin()) {
1080    // If it's a normal cleanup, we need to pretend that the
1081    // fallthrough is unreachable.
1082    CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
1083    PopCleanupBlock();
1084    Builder.restoreIP(SavedIP);
1085    return;
1086  }
1087
1088  // Otherwise, follow the general case.
1089  SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
1090
1091  Scope.setActive(false);
1092}
1093
1094llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
1095  if (!NormalCleanupDest)
1096    NormalCleanupDest =
1097      CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
1098  return NormalCleanupDest;
1099}
1100
1101/// Emits all the code to cause the given temporary to be cleaned up.
1102void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
1103                                       QualType TempType,
1104                                       llvm::Value *Ptr) {
1105  pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
1106              /*useEHCleanup*/ true);
1107}
1108