CodeGenFunction.cpp revision fa1f756f88f4e6f3da2673082fdc1e8f54bccd6f
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This coordinates the per-function state used while generating code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGDebugInfo.h"
17#include "clang/Basic/TargetInfo.h"
18#include "clang/AST/APValue.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
21#include "llvm/Support/CFG.h"
22using namespace clang;
23using namespace CodeGen;
24
25CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
26  : CGM(cgm), Target(CGM.getContext().Target), SwitchInsn(NULL),
27    CaseRangeBlock(NULL) {
28    LLVMIntTy = ConvertType(getContext().IntTy);
29    LLVMPointerWidth = Target.getPointerWidth(0);
30}
31
32ASTContext &CodeGenFunction::getContext() const {
33  return CGM.getContext();
34}
35
36
37llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
38  llvm::BasicBlock *&BB = LabelMap[S];
39  if (BB) return BB;
40
41  // Create, but don't insert, the new block.
42  return BB = createBasicBlock(S->getName());
43}
44
45llvm::Constant *
46CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
47  return cast<llvm::Constant>(LocalDeclMap[BVD]);
48}
49
50llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD)
51{
52  return LocalDeclMap[VD];
53}
54
55const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
56  return CGM.getTypes().ConvertTypeForMem(T);
57}
58
59const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
60  return CGM.getTypes().ConvertType(T);
61}
62
63bool CodeGenFunction::isObjCPointerType(QualType T) {
64  // All Objective-C types are pointers.
65  return T->isObjCInterfaceType() ||
66    T->isObjCQualifiedInterfaceType() || T->isObjCQualifiedIdType();
67}
68
69bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
70  // FIXME: Use positive checks instead of negative ones to be more
71  // robust in the face of extension.
72  return !isObjCPointerType(T) &&!T->isRealType() && !T->isPointerLikeType() &&
73    !T->isVoidType() && !T->isVectorType() && !T->isFunctionType() &&
74    !T->isBlockPointerType();
75}
76
77void CodeGenFunction::EmitReturnBlock() {
78  // For cleanliness, we try to avoid emitting the return block for
79  // simple cases.
80  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
81
82  if (CurBB) {
83    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
84
85    // We have a valid insert point, reuse it if there are no explicit
86    // jumps to the return block.
87    if (ReturnBlock->use_empty())
88      delete ReturnBlock;
89    else
90      EmitBlock(ReturnBlock);
91    return;
92  }
93
94  // Otherwise, if the return block is the target of a single direct
95  // branch then we can just put the code in that block instead. This
96  // cleans up functions which started with a unified return block.
97  if (ReturnBlock->hasOneUse()) {
98    llvm::BranchInst *BI =
99      dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
100    if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
101      // Reset insertion point and delete the branch.
102      Builder.SetInsertPoint(BI->getParent());
103      BI->eraseFromParent();
104      delete ReturnBlock;
105      return;
106    }
107  }
108
109  // FIXME: We are at an unreachable point, there is no reason to emit
110  // the block unless it has uses. However, we still need a place to
111  // put the debug region.end for now.
112
113  EmitBlock(ReturnBlock);
114}
115
116void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
117  // Finish emission of indirect switches.
118  EmitIndirectSwitches();
119
120  assert(BreakContinueStack.empty() &&
121         "mismatched push/pop in break/continue stack!");
122  assert(BlockScopes.empty() &&
123         "did not remove all blocks from block scope map!");
124  assert(CleanupEntries.empty() &&
125         "mismatched push/pop in cleanup stack!");
126
127  // Emit function epilog (to return).
128  EmitReturnBlock();
129
130  // Emit debug descriptor for function end.
131  if (CGDebugInfo *DI = CGM.getDebugInfo()) {
132    DI->setLocation(EndLoc);
133    DI->EmitRegionEnd(CurFn, Builder);
134  }
135
136  EmitFunctionEpilog(*CurFnInfo, ReturnValue);
137
138  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
139  AllocaInsertPt->eraseFromParent();
140  AllocaInsertPt = 0;
141}
142
143void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy,
144                                    llvm::Function *Fn,
145                                    const FunctionArgList &Args,
146                                    SourceLocation StartLoc) {
147  DidCallStackSave = false;
148  CurFuncDecl = D;
149  FnRetTy = RetTy;
150  CurFn = Fn;
151  assert(CurFn->isDeclaration() && "Function already has body?");
152
153  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
154
155  // Create a marker to make it easy to insert allocas into the entryblock
156  // later.  Don't create this with the builder, because we don't want it
157  // folded.
158  llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::Int32Ty);
159  AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "allocapt",
160                                         EntryBB);
161
162  ReturnBlock = createBasicBlock("return");
163  ReturnValue = 0;
164  if (!RetTy->isVoidType())
165    ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval");
166
167  Builder.SetInsertPoint(EntryBB);
168
169  // Emit subprogram debug descriptor.
170  // FIXME: The cast here is a huge hack.
171  if (CGDebugInfo *DI = CGM.getDebugInfo()) {
172    DI->setLocation(StartLoc);
173    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
174      DI->EmitFunctionStart(FD->getIdentifier()->getName(),
175                            RetTy, CurFn, Builder);
176    } else {
177      // Just use LLVM function name.
178      DI->EmitFunctionStart(Fn->getName().c_str(),
179                            RetTy, CurFn, Builder);
180    }
181  }
182
183  // FIXME: Leaked.
184  CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args);
185  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
186
187  // If any of the arguments have a variably modified type, make sure to
188  // emit the type size.
189  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
190       i != e; ++i) {
191    QualType Ty = i->second;
192
193    if (Ty->isVariablyModifiedType())
194      EmitVLASize(Ty);
195  }
196}
197
198void CodeGenFunction::GenerateCode(const FunctionDecl *FD,
199                                   llvm::Function *Fn) {
200  FunctionArgList Args;
201  if (FD->getNumParams()) {
202    const FunctionTypeProto* FProto = FD->getType()->getAsFunctionTypeProto();
203    assert(FProto && "Function def must have prototype!");
204
205    for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
206      Args.push_back(std::make_pair(FD->getParamDecl(i),
207                                    FProto->getArgType(i)));
208  }
209
210  StartFunction(FD, FD->getResultType(), Fn, Args,
211                cast<CompoundStmt>(FD->getBody())->getLBracLoc());
212
213  EmitStmt(FD->getBody());
214
215  const CompoundStmt *S = dyn_cast<CompoundStmt>(FD->getBody());
216  if (S) {
217    FinishFunction(S->getRBracLoc());
218  } else {
219    FinishFunction();
220  }
221}
222
223/// ContainsLabel - Return true if the statement contains a label in it.  If
224/// this statement is not executed normally, it not containing a label means
225/// that we can just remove the code.
226bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
227  // Null statement, not a label!
228  if (S == 0) return false;
229
230  // If this is a label, we have to emit the code, consider something like:
231  // if (0) {  ...  foo:  bar(); }  goto foo;
232  if (isa<LabelStmt>(S))
233    return true;
234
235  // If this is a case/default statement, and we haven't seen a switch, we have
236  // to emit the code.
237  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
238    return true;
239
240  // If this is a switch statement, we want to ignore cases below it.
241  if (isa<SwitchStmt>(S))
242    IgnoreCaseStmts = true;
243
244  // Scan subexpressions for verboten labels.
245  for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
246       I != E; ++I)
247    if (ContainsLabel(*I, IgnoreCaseStmts))
248      return true;
249
250  return false;
251}
252
253
254/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
255/// a constant, or if it does but contains a label, return 0.  If it constant
256/// folds to 'true' and does not contain a label, return 1, if it constant folds
257/// to 'false' and does not contain a label, return -1.
258int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
259  // FIXME: Rename and handle conversion of other evaluatable things
260  // to bool.
261  Expr::EvalResult Result;
262  if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
263      Result.HasSideEffects)
264    return 0;  // Not foldable, not integer or not fully evaluatable.
265
266  if (CodeGenFunction::ContainsLabel(Cond))
267    return 0;  // Contains a label.
268
269  return Result.Val.getInt().getBoolValue() ? 1 : -1;
270}
271
272
273/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
274/// statement) to the specified blocks.  Based on the condition, this might try
275/// to simplify the codegen of the conditional based on the branch.
276///
277void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
278                                           llvm::BasicBlock *TrueBlock,
279                                           llvm::BasicBlock *FalseBlock) {
280  if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
281    return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
282
283  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
284    // Handle X && Y in a condition.
285    if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
286      // If we have "1 && X", simplify the code.  "0 && X" would have constant
287      // folded if the case was simple enough.
288      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
289        // br(1 && X) -> br(X).
290        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
291      }
292
293      // If we have "X && 1", simplify the code to use an uncond branch.
294      // "X && 0" would have been constant folded to 0.
295      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
296        // br(X && 1) -> br(X).
297        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
298      }
299
300      // Emit the LHS as a conditional.  If the LHS conditional is false, we
301      // want to jump to the FalseBlock.
302      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
303      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
304      EmitBlock(LHSTrue);
305
306      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
307      return;
308    } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
309      // If we have "0 || X", simplify the code.  "1 || X" would have constant
310      // folded if the case was simple enough.
311      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
312        // br(0 || X) -> br(X).
313        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
314      }
315
316      // If we have "X || 0", simplify the code to use an uncond branch.
317      // "X || 1" would have been constant folded to 1.
318      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
319        // br(X || 0) -> br(X).
320        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
321      }
322
323      // Emit the LHS as a conditional.  If the LHS conditional is true, we
324      // want to jump to the TrueBlock.
325      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
326      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
327      EmitBlock(LHSFalse);
328
329      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
330      return;
331    }
332  }
333
334  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
335    // br(!x, t, f) -> br(x, f, t)
336    if (CondUOp->getOpcode() == UnaryOperator::LNot)
337      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
338  }
339
340  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
341    // Handle ?: operator.
342
343    // Just ignore GNU ?: extension.
344    if (CondOp->getLHS()) {
345      // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
346      llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
347      llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
348      EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
349      EmitBlock(LHSBlock);
350      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
351      EmitBlock(RHSBlock);
352      EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
353      return;
354    }
355  }
356
357  // Emit the code with the fully general case.
358  llvm::Value *CondV = EvaluateExprAsBool(Cond);
359  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
360}
361
362/// getCGRecordLayout - Return record layout info.
363const CGRecordLayout *CodeGenFunction::getCGRecordLayout(CodeGenTypes &CGT,
364                                                         QualType Ty) {
365  const RecordType *RTy = Ty->getAsRecordType();
366  assert (RTy && "Unexpected type. RecordType expected here.");
367
368  return CGT.getCGRecordLayout(RTy->getDecl());
369}
370
371/// ErrorUnsupported - Print out an error that codegen doesn't support the
372/// specified stmt yet.
373void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
374                                       bool OmitOnError) {
375  CGM.ErrorUnsupported(S, Type, OmitOnError);
376}
377
378unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) {
379  // Use LabelIDs.size() as the new ID if one hasn't been assigned.
380  return LabelIDs.insert(std::make_pair(L, LabelIDs.size())).first->second;
381}
382
383void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty)
384{
385  const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
386  if (DestPtr->getType() != BP)
387    DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
388
389  // Get size and alignment info for this aggregate.
390  std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
391
392  // FIXME: Handle variable sized types.
393  const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
394
395  Builder.CreateCall4(CGM.getMemSetFn(), DestPtr,
396                      llvm::ConstantInt::getNullValue(llvm::Type::Int8Ty),
397                      // TypeInfo.first describes size in bits.
398                      llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
399                      llvm::ConstantInt::get(llvm::Type::Int32Ty,
400                                             TypeInfo.second/8));
401}
402
403void CodeGenFunction::EmitIndirectSwitches() {
404  llvm::BasicBlock *Default;
405
406  if (IndirectSwitches.empty())
407    return;
408
409  if (!LabelIDs.empty()) {
410    Default = getBasicBlockForLabel(LabelIDs.begin()->first);
411  } else {
412    // No possible targets for indirect goto, just emit an infinite
413    // loop.
414    Default = createBasicBlock("indirectgoto.loop", CurFn);
415    llvm::BranchInst::Create(Default, Default);
416  }
417
418  for (std::vector<llvm::SwitchInst*>::iterator i = IndirectSwitches.begin(),
419         e = IndirectSwitches.end(); i != e; ++i) {
420    llvm::SwitchInst *I = *i;
421
422    I->setSuccessor(0, Default);
423    for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(),
424           LE = LabelIDs.end(); LI != LE; ++LI) {
425      I->addCase(llvm::ConstantInt::get(llvm::Type::Int32Ty,
426                                        LI->second),
427                 getBasicBlockForLabel(LI->first));
428    }
429  }
430}
431
432llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty)
433{
434  // FIXME: This entire method is hardcoded for 32-bit X86.
435
436  const char *TargetPrefix = getContext().Target.getTargetPrefix();
437
438  if (strcmp(TargetPrefix, "x86") != 0 ||
439      getContext().Target.getPointerWidth(0) != 32)
440    return 0;
441
442  const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
443  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
444
445  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
446                                                       "ap");
447  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
448  llvm::Value *AddrTyped =
449    Builder.CreateBitCast(Addr,
450                          llvm::PointerType::getUnqual(ConvertType(Ty)));
451
452  uint64_t SizeInBytes = getContext().getTypeSize(Ty) / 8;
453  const unsigned ArgumentSizeInBytes = 4;
454  if (SizeInBytes < ArgumentSizeInBytes)
455    SizeInBytes = ArgumentSizeInBytes;
456
457  llvm::Value *NextAddr =
458    Builder.CreateGEP(Addr,
459                      llvm::ConstantInt::get(llvm::Type::Int32Ty, SizeInBytes),
460                      "ap.next");
461  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
462
463  return AddrTyped;
464}
465
466
467llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT)
468{
469  llvm::Value *&SizeEntry = VLASizeMap[VAT];
470
471  assert(SizeEntry && "Did not emit size for type");
472  return SizeEntry;
473}
474
475llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty)
476{
477  assert(Ty->isVariablyModifiedType() &&
478         "Must pass variably modified type to EmitVLASizes!");
479
480  if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
481    llvm::Value *&SizeEntry = VLASizeMap[VAT];
482
483    if (!SizeEntry) {
484      // Get the element size;
485      llvm::Value *ElemSize;
486
487      QualType ElemTy = VAT->getElementType();
488
489      const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
490
491      if (ElemTy->isVariableArrayType())
492        ElemSize = EmitVLASize(ElemTy);
493      else {
494        ElemSize = llvm::ConstantInt::get(SizeTy,
495                                          getContext().getTypeSize(ElemTy) / 8);
496      }
497
498      llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
499      NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
500
501      SizeEntry = Builder.CreateMul(ElemSize, NumElements);
502    }
503
504    return SizeEntry;
505  } else if (const PointerType *PT = Ty->getAsPointerType())
506    EmitVLASize(PT->getPointeeType());
507  else {
508    assert(0 && "unknown VM type!");
509  }
510
511  return 0;
512}
513
514llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
515  if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
516    return EmitScalarExpr(E);
517  }
518  return EmitLValue(E).getAddress();
519}
520
521void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupBlock)
522{
523  CleanupEntries.push_back(CleanupEntry(CleanupBlock));
524}
525
526void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize)
527{
528  assert(CleanupEntries.size() >= OldCleanupStackSize &&
529         "Cleanup stack mismatch!");
530
531  while (CleanupEntries.size() > OldCleanupStackSize)
532    EmitCleanupBlock();
533}
534
535CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock()
536{
537  CleanupEntry &CE = CleanupEntries.back();
538
539  llvm::BasicBlock *CleanupBlock = CE.CleanupBlock;
540
541  std::vector<llvm::BasicBlock *> Blocks;
542  std::swap(Blocks, CE.Blocks);
543
544  std::vector<llvm::BranchInst *> BranchFixups;
545  std::swap(BranchFixups, CE.BranchFixups);
546
547  CleanupEntries.pop_back();
548
549  // Check if any branch fixups pointed to the scope we just popped. If so,
550  // we can remove them.
551  for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
552    llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
553    BlockScopeMap::iterator I = BlockScopes.find(Dest);
554
555    if (I == BlockScopes.end())
556      continue;
557
558    assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
559
560    if (I->second == CleanupEntries.size()) {
561      // We don't need to do this branch fixup.
562      BranchFixups[i] = BranchFixups.back();
563      BranchFixups.pop_back();
564      i--;
565      e--;
566      continue;
567    }
568  }
569
570  llvm::BasicBlock *SwitchBlock = 0;
571  llvm::BasicBlock *EndBlock = 0;
572  if (!BranchFixups.empty()) {
573    SwitchBlock = createBasicBlock("cleanup.switch");
574    EndBlock = createBasicBlock("cleanup.end");
575
576    llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
577
578    Builder.SetInsertPoint(SwitchBlock);
579
580    llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::Int32Ty,
581                                                "cleanup.dst");
582    llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
583
584    // Create a switch instruction to determine where to jump next.
585    llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
586                                                BranchFixups.size());
587
588    // Restore the current basic block (if any)
589    if (CurBB)
590      Builder.SetInsertPoint(CurBB);
591    else
592      Builder.ClearInsertionPoint();
593
594    for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
595      llvm::BranchInst *BI = BranchFixups[i];
596      llvm::BasicBlock *Dest = BI->getSuccessor(0);
597
598      // Fixup the branch instruction to point to the cleanup block.
599      BI->setSuccessor(0, CleanupBlock);
600
601      if (CleanupEntries.empty()) {
602        llvm::ConstantInt *ID;
603
604        // Check if we already have a destination for this block.
605        if (Dest == SI->getDefaultDest())
606          ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
607        else {
608          ID = SI->findCaseDest(Dest);
609          if (!ID) {
610            // No code found, get a new unique one by using the number of
611            // switch successors.
612            ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
613                                        SI->getNumSuccessors());
614            SI->addCase(ID, Dest);
615          }
616        }
617
618        // Store the jump destination before the branch instruction.
619        new llvm::StoreInst(ID, DestCodePtr, BI);
620      } else {
621        // We need to jump through another cleanup block. Create a pad block
622        // with a branch instruction that jumps to the final destination and
623        // add it as a branch fixup to the current cleanup scope.
624
625        // Create the pad block.
626        llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
627
628        // Create a unique case ID.
629        llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
630                                                       SI->getNumSuccessors());
631
632        // Store the jump destination before the branch instruction.
633        new llvm::StoreInst(ID, DestCodePtr, BI);
634
635        // Add it as the destination.
636        SI->addCase(ID, CleanupPad);
637
638        // Create the branch to the final destination.
639        llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
640        CleanupPad->getInstList().push_back(BI);
641
642        // And add it as a branch fixup.
643        CleanupEntries.back().BranchFixups.push_back(BI);
644      }
645    }
646  }
647
648  // Remove all blocks from the block scope map.
649  for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
650    assert(BlockScopes.count(Blocks[i]) &&
651           "Did not find block in scope map!");
652
653    BlockScopes.erase(Blocks[i]);
654  }
655
656  return CleanupBlockInfo(CleanupBlock, SwitchBlock, EndBlock);
657}
658
659void CodeGenFunction::EmitCleanupBlock()
660{
661  CleanupBlockInfo Info = PopCleanupBlock();
662
663  EmitBlock(Info.CleanupBlock);
664
665  if (Info.SwitchBlock)
666    EmitBlock(Info.SwitchBlock);
667  if (Info.EndBlock)
668    EmitBlock(Info.EndBlock);
669}
670
671void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI)
672{
673  assert(!CleanupEntries.empty() &&
674         "Trying to add branch fixup without cleanup block!");
675
676  // FIXME: We could be more clever here and check if there's already a
677  // branch fixup for this destination and recycle it.
678  CleanupEntries.back().BranchFixups.push_back(BI);
679}
680
681void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest)
682{
683  if (!HaveInsertPoint())
684    return;
685
686  llvm::BranchInst* BI = Builder.CreateBr(Dest);
687
688  Builder.ClearInsertionPoint();
689
690  // The stack is empty, no need to do any cleanup.
691  if (CleanupEntries.empty())
692    return;
693
694  if (!Dest->getParent()) {
695    // We are trying to branch to a block that hasn't been inserted yet.
696    AddBranchFixup(BI);
697    return;
698  }
699
700  BlockScopeMap::iterator I = BlockScopes.find(Dest);
701  if (I == BlockScopes.end()) {
702    // We are trying to jump to a block that is outside of any cleanup scope.
703    AddBranchFixup(BI);
704    return;
705  }
706
707  assert(I->second < CleanupEntries.size() &&
708         "Trying to branch into cleanup region");
709
710  if (I->second == CleanupEntries.size() - 1) {
711    // We have a branch to a block in the same scope.
712    return;
713  }
714
715  AddBranchFixup(BI);
716}
717