CodeGenFunction.cpp revision f1549f66a8216a78112286e3978cea2c29d6334c
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This coordinates the per-function state used while generating code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGDebugInfo.h"
17#include "CGException.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/APValue.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/Decl.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/StmtCXX.h"
24#include "clang/Frontend/CodeGenOptions.h"
25#include "llvm/Target/TargetData.h"
26#include "llvm/Intrinsics.h"
27using namespace clang;
28using namespace CodeGen;
29
30CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
31  : BlockFunction(cgm, *this, Builder), CGM(cgm),
32    Target(CGM.getContext().Target),
33    Builder(cgm.getModule().getContext()),
34    ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
35    SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
36    DidCallStackSave(false), UnreachableBlock(0),
37    CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
38    ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
39    TrapBB(0) {
40
41  // Get some frequently used types.
42  LLVMPointerWidth = Target.getPointerWidth(0);
43  llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
44  IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
45  Int32Ty  = llvm::Type::getInt32Ty(LLVMContext);
46  Int64Ty  = llvm::Type::getInt64Ty(LLVMContext);
47
48  Exceptions = getContext().getLangOptions().Exceptions;
49  CatchUndefined = getContext().getLangOptions().CatchUndefined;
50  CGM.getMangleContext().startNewFunction();
51}
52
53ASTContext &CodeGenFunction::getContext() const {
54  return CGM.getContext();
55}
56
57
58llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
59  llvm::Value *Res = LocalDeclMap[VD];
60  assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
61  return Res;
62}
63
64llvm::Constant *
65CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
66  return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
67}
68
69const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
70  return CGM.getTypes().ConvertTypeForMem(T);
71}
72
73const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
74  return CGM.getTypes().ConvertType(T);
75}
76
77bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
78  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
79    T->isMemberFunctionPointerType();
80}
81
82void CodeGenFunction::EmitReturnBlock() {
83  // For cleanliness, we try to avoid emitting the return block for
84  // simple cases.
85  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
86
87  if (CurBB) {
88    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
89
90    // We have a valid insert point, reuse it if it is empty or there are no
91    // explicit jumps to the return block.
92    if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
93      ReturnBlock.Block->replaceAllUsesWith(CurBB);
94      delete ReturnBlock.Block;
95    } else
96      EmitBlock(ReturnBlock.Block);
97    return;
98  }
99
100  // Otherwise, if the return block is the target of a single direct
101  // branch then we can just put the code in that block instead. This
102  // cleans up functions which started with a unified return block.
103  if (ReturnBlock.Block->hasOneUse()) {
104    llvm::BranchInst *BI =
105      dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
106    if (BI && BI->isUnconditional() &&
107        BI->getSuccessor(0) == ReturnBlock.Block) {
108      // Reset insertion point and delete the branch.
109      Builder.SetInsertPoint(BI->getParent());
110      BI->eraseFromParent();
111      delete ReturnBlock.Block;
112      return;
113    }
114  }
115
116  // FIXME: We are at an unreachable point, there is no reason to emit the block
117  // unless it has uses. However, we still need a place to put the debug
118  // region.end for now.
119
120  EmitBlock(ReturnBlock.Block);
121}
122
123static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
124  if (!BB) return;
125  if (!BB->use_empty())
126    return CGF.CurFn->getBasicBlockList().push_back(BB);
127  delete BB;
128}
129
130void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
131  assert(BreakContinueStack.empty() &&
132         "mismatched push/pop in break/continue stack!");
133
134  // Emit function epilog (to return).
135  EmitReturnBlock();
136
137  EmitFunctionInstrumentation("__cyg_profile_func_exit");
138
139  // Emit debug descriptor for function end.
140  if (CGDebugInfo *DI = getDebugInfo()) {
141    DI->setLocation(EndLoc);
142    DI->EmitRegionEnd(CurFn, Builder);
143  }
144
145  EmitFunctionEpilog(*CurFnInfo);
146  EmitEndEHSpec(CurCodeDecl);
147
148  assert(EHStack.empty() &&
149         "did not remove all scopes from cleanup stack!");
150
151  // If someone did an indirect goto, emit the indirect goto block at the end of
152  // the function.
153  if (IndirectBranch) {
154    EmitBlock(IndirectBranch->getParent());
155    Builder.ClearInsertionPoint();
156  }
157
158  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
159  llvm::Instruction *Ptr = AllocaInsertPt;
160  AllocaInsertPt = 0;
161  Ptr->eraseFromParent();
162
163  // If someone took the address of a label but never did an indirect goto, we
164  // made a zero entry PHI node, which is illegal, zap it now.
165  if (IndirectBranch) {
166    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
167    if (PN->getNumIncomingValues() == 0) {
168      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
169      PN->eraseFromParent();
170    }
171  }
172
173  EmitIfUsed(*this, TerminateLandingPad);
174  EmitIfUsed(*this, TerminateHandler);
175  EmitIfUsed(*this, UnreachableBlock);
176}
177
178/// ShouldInstrumentFunction - Return true if the current function should be
179/// instrumented with __cyg_profile_func_* calls
180bool CodeGenFunction::ShouldInstrumentFunction() {
181  if (!CGM.getCodeGenOpts().InstrumentFunctions)
182    return false;
183  if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
184    return false;
185  return true;
186}
187
188/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
189/// instrumentation function with the current function and the call site, if
190/// function instrumentation is enabled.
191void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
192  if (!ShouldInstrumentFunction())
193    return;
194
195  const llvm::PointerType *PointerTy;
196  const llvm::FunctionType *FunctionTy;
197  std::vector<const llvm::Type*> ProfileFuncArgs;
198
199  // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
200  PointerTy = llvm::Type::getInt8PtrTy(VMContext);
201  ProfileFuncArgs.push_back(PointerTy);
202  ProfileFuncArgs.push_back(PointerTy);
203  FunctionTy = llvm::FunctionType::get(
204    llvm::Type::getVoidTy(VMContext),
205    ProfileFuncArgs, false);
206
207  llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
208  llvm::CallInst *CallSite = Builder.CreateCall(
209    CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
210    llvm::ConstantInt::get(Int32Ty, 0),
211    "callsite");
212
213  Builder.CreateCall2(F,
214                      llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
215                      CallSite);
216}
217
218void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
219                                    llvm::Function *Fn,
220                                    const FunctionArgList &Args,
221                                    SourceLocation StartLoc) {
222  const Decl *D = GD.getDecl();
223
224  DidCallStackSave = false;
225  CurCodeDecl = CurFuncDecl = D;
226  FnRetTy = RetTy;
227  CurFn = Fn;
228  assert(CurFn->isDeclaration() && "Function already has body?");
229
230  // Pass inline keyword to optimizer if it appears explicitly on any
231  // declaration.
232  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
233    for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
234           RE = FD->redecls_end(); RI != RE; ++RI)
235      if (RI->isInlineSpecified()) {
236        Fn->addFnAttr(llvm::Attribute::InlineHint);
237        break;
238      }
239
240  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
241
242  // Create a marker to make it easy to insert allocas into the entryblock
243  // later.  Don't create this with the builder, because we don't want it
244  // folded.
245  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
246  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
247  if (Builder.isNamePreserving())
248    AllocaInsertPt->setName("allocapt");
249
250  ReturnBlock = getJumpDestInCurrentScope("return");
251
252  Builder.SetInsertPoint(EntryBB);
253
254  QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
255                                                 false, false, 0, 0,
256                                                 /*FIXME?*/
257                                                 FunctionType::ExtInfo());
258
259  // Emit subprogram debug descriptor.
260  if (CGDebugInfo *DI = getDebugInfo()) {
261    DI->setLocation(StartLoc);
262    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
263  }
264
265  EmitFunctionInstrumentation("__cyg_profile_func_enter");
266
267  // FIXME: Leaked.
268  // CC info is ignored, hopefully?
269  CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
270                                              FunctionType::ExtInfo());
271
272  if (RetTy->isVoidType()) {
273    // Void type; nothing to return.
274    ReturnValue = 0;
275  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
276             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
277    // Indirect aggregate return; emit returned value directly into sret slot.
278    // This reduces code size, and affects correctness in C++.
279    ReturnValue = CurFn->arg_begin();
280  } else {
281    ReturnValue = CreateIRTemp(RetTy, "retval");
282  }
283
284  EmitStartEHSpec(CurCodeDecl);
285  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
286
287  if (CXXThisDecl)
288    CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
289  if (CXXVTTDecl)
290    CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
291
292  // If any of the arguments have a variably modified type, make sure to
293  // emit the type size.
294  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
295       i != e; ++i) {
296    QualType Ty = i->second;
297
298    if (Ty->isVariablyModifiedType())
299      EmitVLASize(Ty);
300  }
301}
302
303void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
304  const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
305  assert(FD->getBody());
306  EmitStmt(FD->getBody());
307}
308
309void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
310  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
311
312  // Check if we should generate debug info for this function.
313  if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
314    DebugInfo = CGM.getDebugInfo();
315
316  FunctionArgList Args;
317
318  CurGD = GD;
319  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
320    if (MD->isInstance()) {
321      // Create the implicit 'this' decl.
322      // FIXME: I'm not entirely sure I like using a fake decl just for code
323      // generation. Maybe we can come up with a better way?
324      CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
325                                              FD->getLocation(),
326                                              &getContext().Idents.get("this"),
327                                              MD->getThisType(getContext()));
328      Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
329
330      // Check if we need a VTT parameter as well.
331      if (CodeGenVTables::needsVTTParameter(GD)) {
332        // FIXME: The comment about using a fake decl above applies here too.
333        QualType T = getContext().getPointerType(getContext().VoidPtrTy);
334        CXXVTTDecl =
335          ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
336                                    &getContext().Idents.get("vtt"), T);
337        Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
338      }
339    }
340  }
341
342  if (FD->getNumParams()) {
343    const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
344    assert(FProto && "Function def must have prototype!");
345
346    for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
347      Args.push_back(std::make_pair(FD->getParamDecl(i),
348                                    FProto->getArgType(i)));
349  }
350
351  SourceRange BodyRange;
352  if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
353
354  // Emit the standard function prologue.
355  StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
356
357  // Generate the body of the function.
358  if (isa<CXXDestructorDecl>(FD))
359    EmitDestructorBody(Args);
360  else if (isa<CXXConstructorDecl>(FD))
361    EmitConstructorBody(Args);
362  else
363    EmitFunctionBody(Args);
364
365  // Emit the standard function epilogue.
366  FinishFunction(BodyRange.getEnd());
367
368  // Destroy the 'this' declaration.
369  if (CXXThisDecl)
370    CXXThisDecl->Destroy(getContext());
371
372  // Destroy the VTT declaration.
373  if (CXXVTTDecl)
374    CXXVTTDecl->Destroy(getContext());
375}
376
377/// ContainsLabel - Return true if the statement contains a label in it.  If
378/// this statement is not executed normally, it not containing a label means
379/// that we can just remove the code.
380bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
381  // Null statement, not a label!
382  if (S == 0) return false;
383
384  // If this is a label, we have to emit the code, consider something like:
385  // if (0) {  ...  foo:  bar(); }  goto foo;
386  if (isa<LabelStmt>(S))
387    return true;
388
389  // If this is a case/default statement, and we haven't seen a switch, we have
390  // to emit the code.
391  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
392    return true;
393
394  // If this is a switch statement, we want to ignore cases below it.
395  if (isa<SwitchStmt>(S))
396    IgnoreCaseStmts = true;
397
398  // Scan subexpressions for verboten labels.
399  for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
400       I != E; ++I)
401    if (ContainsLabel(*I, IgnoreCaseStmts))
402      return true;
403
404  return false;
405}
406
407
408/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
409/// a constant, or if it does but contains a label, return 0.  If it constant
410/// folds to 'true' and does not contain a label, return 1, if it constant folds
411/// to 'false' and does not contain a label, return -1.
412int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
413  // FIXME: Rename and handle conversion of other evaluatable things
414  // to bool.
415  Expr::EvalResult Result;
416  if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
417      Result.HasSideEffects)
418    return 0;  // Not foldable, not integer or not fully evaluatable.
419
420  if (CodeGenFunction::ContainsLabel(Cond))
421    return 0;  // Contains a label.
422
423  return Result.Val.getInt().getBoolValue() ? 1 : -1;
424}
425
426
427/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
428/// statement) to the specified blocks.  Based on the condition, this might try
429/// to simplify the codegen of the conditional based on the branch.
430///
431void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
432                                           llvm::BasicBlock *TrueBlock,
433                                           llvm::BasicBlock *FalseBlock) {
434  if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
435    return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
436
437  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
438    // Handle X && Y in a condition.
439    if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
440      // If we have "1 && X", simplify the code.  "0 && X" would have constant
441      // folded if the case was simple enough.
442      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
443        // br(1 && X) -> br(X).
444        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
445      }
446
447      // If we have "X && 1", simplify the code to use an uncond branch.
448      // "X && 0" would have been constant folded to 0.
449      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
450        // br(X && 1) -> br(X).
451        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
452      }
453
454      // Emit the LHS as a conditional.  If the LHS conditional is false, we
455      // want to jump to the FalseBlock.
456      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
457      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
458      EmitBlock(LHSTrue);
459
460      // Any temporaries created here are conditional.
461      BeginConditionalBranch();
462      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
463      EndConditionalBranch();
464
465      return;
466    } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
467      // If we have "0 || X", simplify the code.  "1 || X" would have constant
468      // folded if the case was simple enough.
469      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
470        // br(0 || X) -> br(X).
471        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
472      }
473
474      // If we have "X || 0", simplify the code to use an uncond branch.
475      // "X || 1" would have been constant folded to 1.
476      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
477        // br(X || 0) -> br(X).
478        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
479      }
480
481      // Emit the LHS as a conditional.  If the LHS conditional is true, we
482      // want to jump to the TrueBlock.
483      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
484      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
485      EmitBlock(LHSFalse);
486
487      // Any temporaries created here are conditional.
488      BeginConditionalBranch();
489      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
490      EndConditionalBranch();
491
492      return;
493    }
494  }
495
496  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
497    // br(!x, t, f) -> br(x, f, t)
498    if (CondUOp->getOpcode() == UnaryOperator::LNot)
499      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
500  }
501
502  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
503    // Handle ?: operator.
504
505    // Just ignore GNU ?: extension.
506    if (CondOp->getLHS()) {
507      // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
508      llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
509      llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
510      EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
511      EmitBlock(LHSBlock);
512      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
513      EmitBlock(RHSBlock);
514      EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
515      return;
516    }
517  }
518
519  // Emit the code with the fully general case.
520  llvm::Value *CondV = EvaluateExprAsBool(Cond);
521  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
522}
523
524/// ErrorUnsupported - Print out an error that codegen doesn't support the
525/// specified stmt yet.
526void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
527                                       bool OmitOnError) {
528  CGM.ErrorUnsupported(S, Type, OmitOnError);
529}
530
531void
532CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
533  // If the type contains a pointer to data member we can't memset it to zero.
534  // Instead, create a null constant and copy it to the destination.
535  if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
536    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
537
538    llvm::GlobalVariable *NullVariable =
539      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
540                               /*isConstant=*/true,
541                               llvm::GlobalVariable::PrivateLinkage,
542                               NullConstant, llvm::Twine());
543    EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
544    return;
545  }
546
547
548  // Ignore empty classes in C++.
549  if (getContext().getLangOptions().CPlusPlus) {
550    if (const RecordType *RT = Ty->getAs<RecordType>()) {
551      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
552        return;
553    }
554  }
555
556  // Otherwise, just memset the whole thing to zero.  This is legal
557  // because in LLVM, all default initializers (other than the ones we just
558  // handled above) are guaranteed to have a bit pattern of all zeros.
559  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
560  if (DestPtr->getType() != BP)
561    DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
562
563  // Get size and alignment info for this aggregate.
564  std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
565
566  // Don't bother emitting a zero-byte memset.
567  if (TypeInfo.first == 0)
568    return;
569
570  // FIXME: Handle variable sized types.
571  Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
572                 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
573                      // TypeInfo.first describes size in bits.
574                      llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
575                      llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
576                      llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
577                                             0));
578}
579
580llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
581  // Make sure that there is a block for the indirect goto.
582  if (IndirectBranch == 0)
583    GetIndirectGotoBlock();
584
585  llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
586
587  // Make sure the indirect branch includes all of the address-taken blocks.
588  IndirectBranch->addDestination(BB);
589  return llvm::BlockAddress::get(CurFn, BB);
590}
591
592llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
593  // If we already made the indirect branch for indirect goto, return its block.
594  if (IndirectBranch) return IndirectBranch->getParent();
595
596  CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
597
598  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
599
600  // Create the PHI node that indirect gotos will add entries to.
601  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
602
603  // Create the indirect branch instruction.
604  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
605  return IndirectBranch->getParent();
606}
607
608llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
609  llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
610
611  assert(SizeEntry && "Did not emit size for type");
612  return SizeEntry;
613}
614
615llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
616  assert(Ty->isVariablyModifiedType() &&
617         "Must pass variably modified type to EmitVLASizes!");
618
619  EnsureInsertPoint();
620
621  if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
622    llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
623
624    if (!SizeEntry) {
625      const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
626
627      // Get the element size;
628      QualType ElemTy = VAT->getElementType();
629      llvm::Value *ElemSize;
630      if (ElemTy->isVariableArrayType())
631        ElemSize = EmitVLASize(ElemTy);
632      else
633        ElemSize = llvm::ConstantInt::get(SizeTy,
634            getContext().getTypeSizeInChars(ElemTy).getQuantity());
635
636      llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
637      NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
638
639      SizeEntry = Builder.CreateMul(ElemSize, NumElements);
640    }
641
642    return SizeEntry;
643  }
644
645  if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
646    EmitVLASize(AT->getElementType());
647    return 0;
648  }
649
650  const PointerType *PT = Ty->getAs<PointerType>();
651  assert(PT && "unknown VM type!");
652  EmitVLASize(PT->getPointeeType());
653  return 0;
654}
655
656llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
657  if (CGM.getContext().getBuiltinVaListType()->isArrayType())
658    return EmitScalarExpr(E);
659  return EmitLValue(E).getAddress();
660}
661
662/// Pops cleanup blocks until the given savepoint is reached.
663void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
664  assert(Old.isValid());
665
666  EHScopeStack::iterator E = EHStack.find(Old);
667  while (EHStack.begin() != E)
668    PopCleanupBlock();
669}
670
671/// Destroys a cleanup if it was unused.
672static void DestroyCleanup(CodeGenFunction &CGF,
673                           llvm::BasicBlock *Entry,
674                           llvm::BasicBlock *Exit) {
675  assert(Entry->use_empty() && "destroying cleanup with uses!");
676  assert(Exit->getTerminator() == 0 &&
677         "exit has terminator but entry has no predecessors!");
678
679  // This doesn't always remove the entire cleanup, but it's much
680  // safer as long as we don't know what blocks belong to the cleanup.
681  // A *much* better approach if we care about this inefficiency would
682  // be to lazily emit the cleanup.
683
684  // If the exit block is distinct from the entry, give it a branch to
685  // an unreachable destination.  This preserves the well-formedness
686  // of the IR.
687  if (Entry != Exit)
688    llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
689
690  assert(!Entry->getParent() && "cleanup entry already positioned?");
691  delete Entry;
692}
693
694/// Creates a switch instruction to thread branches out of the given
695/// block (which is the exit block of a cleanup).
696static void CreateCleanupSwitch(CodeGenFunction &CGF,
697                                llvm::BasicBlock *Block) {
698  if (Block->getTerminator()) {
699    assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
700           "cleanup block already has a terminator, but it isn't a switch");
701    return;
702  }
703
704  llvm::Value *DestCodePtr
705    = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
706  CGBuilderTy Builder(Block);
707  llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
708
709  // Create a switch instruction to determine where to jump next.
710  Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
711}
712
713/// Attempts to reduce a cleanup's entry block to a fallthrough.  This
714/// is basically llvm::MergeBlockIntoPredecessor, except
715/// simplified/optimized for the tighter constraints on cleanup
716/// blocks.
717static void SimplifyCleanupEntry(CodeGenFunction &CGF,
718                                 llvm::BasicBlock *Entry) {
719  llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
720  if (!Pred) return;
721
722  llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
723  if (!Br || Br->isConditional()) return;
724  assert(Br->getSuccessor(0) == Entry);
725
726  // If we were previously inserting at the end of the cleanup entry
727  // block, we'll need to continue inserting at the end of the
728  // predecessor.
729  bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
730  assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
731
732  // Kill the branch.
733  Br->eraseFromParent();
734
735  // Merge the blocks.
736  Pred->getInstList().splice(Pred->end(), Entry->getInstList());
737
738  // Kill the entry block.
739  Entry->eraseFromParent();
740
741  if (WasInsertBlock)
742    CGF.Builder.SetInsertPoint(Pred);
743}
744
745/// Attempts to reduce an cleanup's exit switch to an unconditional
746/// branch.
747static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
748  llvm::TerminatorInst *Terminator = Exit->getTerminator();
749  assert(Terminator && "completed cleanup exit has no terminator");
750
751  llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
752  if (!Switch) return;
753  if (Switch->getNumCases() != 2) return; // default + 1
754
755  llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
756  llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
757
758  // Replace the switch instruction with an unconditional branch.
759  llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
760  Switch->eraseFromParent();
761  llvm::BranchInst::Create(Dest, Exit);
762
763  // Delete all uses of the condition variable.
764  Cond->eraseFromParent();
765  while (!CondVar->use_empty())
766    cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
767
768  // Delete the condition variable itself.
769  CondVar->eraseFromParent();
770}
771
772/// Threads a branch fixup through a cleanup block.
773static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
774                                      BranchFixup &Fixup,
775                                      llvm::BasicBlock *Entry,
776                                      llvm::BasicBlock *Exit) {
777  if (!Exit->getTerminator())
778    CreateCleanupSwitch(CGF, Exit);
779
780  // Find the switch and its destination index alloca.
781  llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
782  llvm::Value *DestCodePtr =
783    cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
784
785  // Compute the index of the new case we're adding to the switch.
786  unsigned Index = Switch->getNumCases();
787
788  const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
789  llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
790
791  // Set the index in the origin block.
792  new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
793
794  // Add a case to the switch.
795  Switch->addCase(IndexV, Fixup.Destination);
796
797  // Change the last branch to point to the cleanup entry block.
798  Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
799
800  // And finally, update the fixup.
801  Fixup.LatestBranch = Switch;
802  Fixup.LatestBranchIndex = Index;
803}
804
805/// Try to simplify both the entry and exit edges of a cleanup.
806static void SimplifyCleanupEdges(CodeGenFunction &CGF,
807                                 llvm::BasicBlock *Entry,
808                                 llvm::BasicBlock *Exit) {
809
810  // Given their current implementations, it's important to run these
811  // in this order: SimplifyCleanupEntry will delete Entry if it can
812  // be merged into its predecessor, which will then break
813  // SimplifyCleanupExit if (as is common) Entry == Exit.
814
815  SimplifyCleanupExit(Exit);
816  SimplifyCleanupEntry(CGF, Entry);
817}
818
819/// Pops a cleanup block.  If the block includes a normal cleanup, the
820/// current insertion point is threaded through the cleanup, as are
821/// any branch fixups on the cleanup.
822void CodeGenFunction::PopCleanupBlock() {
823  assert(!EHStack.empty() && "cleanup stack is empty!");
824  assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
825  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
826  assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
827
828  // Handle the EH cleanup if (1) there is one and (2) it's different
829  // from the normal cleanup.
830  if (Scope.isEHCleanup() &&
831      Scope.getEHEntry() != Scope.getNormalEntry()) {
832    llvm::BasicBlock *EHEntry = Scope.getEHEntry();
833    llvm::BasicBlock *EHExit = Scope.getEHExit();
834
835    if (EHEntry->use_empty()) {
836      DestroyCleanup(*this, EHEntry, EHExit);
837    } else {
838      // TODO: this isn't really the ideal location to put this EH
839      // cleanup, but lazy emission is a better solution than trying
840      // to pick a better spot.
841      CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
842      EmitBlock(EHEntry);
843      Builder.restoreIP(SavedIP);
844
845      SimplifyCleanupEdges(*this, EHEntry, EHExit);
846    }
847  }
848
849  // If we only have an EH cleanup, we don't really need to do much
850  // here.  Branch fixups just naturally drop down to the enclosing
851  // cleanup scope.
852  if (!Scope.isNormalCleanup()) {
853    EHStack.popCleanup();
854    assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
855    return;
856  }
857
858  // Check whether the scope has any fixups that need to be threaded.
859  unsigned FixupDepth = Scope.getFixupDepth();
860  bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
861
862  // Grab the entry and exit blocks.
863  llvm::BasicBlock *Entry = Scope.getNormalEntry();
864  llvm::BasicBlock *Exit = Scope.getNormalExit();
865
866  // Check whether anything's been threaded through the cleanup already.
867  assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
868         "cleanup entry/exit mismatch");
869  bool HasExistingBranches = !Entry->use_empty();
870
871  // Check whether we need to emit a "fallthrough" branch through the
872  // cleanup for the current insertion point.
873  llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
874  if (FallThrough && FallThrough->getTerminator())
875    FallThrough = 0;
876
877  // If *nothing* is using the cleanup, kill it.
878  if (!FallThrough && !HasFixups && !HasExistingBranches) {
879    EHStack.popCleanup();
880    DestroyCleanup(*this, Entry, Exit);
881    return;
882  }
883
884  // Otherwise, add the block to the function.
885  EmitBlock(Entry);
886
887  if (FallThrough)
888    Builder.SetInsertPoint(Exit);
889  else
890    Builder.ClearInsertionPoint();
891
892  // Fast case: if we don't have to add any fixups, and either
893  // we don't have a fallthrough or the cleanup wasn't previously
894  // used, then the setup above is sufficient.
895  if (!HasFixups) {
896    if (!FallThrough) {
897      assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
898      EHStack.popCleanup();
899      SimplifyCleanupEdges(*this, Entry, Exit);
900      return;
901    } else if (!HasExistingBranches) {
902      assert(FallThrough && "no reason for cleanup but didn't kill before");
903      // We can't simplify the exit edge in this case because we're
904      // already inserting at the end of the exit block.
905      EHStack.popCleanup();
906      SimplifyCleanupEntry(*this, Entry);
907      return;
908    }
909  }
910
911  // Otherwise we're going to have to thread things through the cleanup.
912  llvm::SmallVector<BranchFixup*, 8> Fixups;
913
914  // Synthesize a fixup for the current insertion point.
915  BranchFixup Cur;
916  if (FallThrough) {
917    Cur.Destination = createBasicBlock("cleanup.cont");
918    Cur.LatestBranch = FallThrough->getTerminator();
919    Cur.LatestBranchIndex = 0;
920    Cur.Origin = Cur.LatestBranch;
921
922    // Restore fixup invariant.  EmitBlock added a branch to the cleanup
923    // which we need to redirect to the destination.
924    cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
925
926    Fixups.push_back(&Cur);
927  } else {
928    Cur.Destination = 0;
929  }
930
931  // Collect any "real" fixups we need to thread.
932  for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
933        I != E; ++I)
934    if (EHStack.getBranchFixup(I).Destination)
935      Fixups.push_back(&EHStack.getBranchFixup(I));
936
937  assert(!Fixups.empty() && "no fixups, invariants broken!");
938
939  // If there's only a single fixup to thread through, do so with
940  // unconditional branches.  This only happens if there's a single
941  // branch and no fallthrough.
942  if (Fixups.size() == 1 && !HasExistingBranches) {
943    Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
944    llvm::BranchInst *Br =
945      llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
946    Fixups[0]->LatestBranch = Br;
947    Fixups[0]->LatestBranchIndex = 0;
948
949  // Otherwise, force a switch statement and thread everything through
950  // the switch.
951  } else {
952    CreateCleanupSwitch(*this, Exit);
953    for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
954      ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
955  }
956
957  // Emit the fallthrough destination block if necessary.
958  if (Cur.Destination)
959    EmitBlock(Cur.Destination);
960
961  // We're finally done with the cleanup.
962  EHStack.popCleanup();
963}
964
965void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
966  if (!HaveInsertPoint())
967    return;
968
969  // Create the branch.
970  llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
971
972  // If we're not in a cleanup scope, we don't need to worry about
973  // fixups.
974  if (!EHStack.hasNormalCleanups()) {
975    Builder.ClearInsertionPoint();
976    return;
977  }
978
979  // Initialize a fixup.
980  BranchFixup Fixup;
981  Fixup.Destination = Dest.Block;
982  Fixup.Origin = BI;
983  Fixup.LatestBranch = BI;
984  Fixup.LatestBranchIndex = 0;
985
986  // If we can't resolve the destination cleanup scope, just add this
987  // to the current cleanup scope.
988  if (!Dest.ScopeDepth.isValid()) {
989    EHStack.addBranchFixup() = Fixup;
990    Builder.ClearInsertionPoint();
991    return;
992  }
993
994  for (EHScopeStack::iterator I = EHStack.begin(),
995         E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
996    if (isa<EHCleanupScope>(*I)) {
997      EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
998      if (Scope.isNormalCleanup())
999        ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
1000                                  Scope.getNormalExit());
1001    }
1002  }
1003
1004  Builder.ClearInsertionPoint();
1005}
1006
1007void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
1008  if (!HaveInsertPoint())
1009    return;
1010
1011  // Create the branch.
1012  llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
1013
1014  // If we're not in a cleanup scope, we don't need to worry about
1015  // fixups.
1016  if (!EHStack.hasEHCleanups()) {
1017    Builder.ClearInsertionPoint();
1018    return;
1019  }
1020
1021  // Initialize a fixup.
1022  BranchFixup Fixup;
1023  Fixup.Destination = Dest.Block;
1024  Fixup.Origin = BI;
1025  Fixup.LatestBranch = BI;
1026  Fixup.LatestBranchIndex = 0;
1027
1028  // We should never get invalid scope depths for these: invalid scope
1029  // depths only arise for as-yet-unemitted labels, and we can't do an
1030  // EH-unwind to one of those.
1031  assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
1032
1033  for (EHScopeStack::iterator I = EHStack.begin(),
1034         E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
1035    if (isa<EHCleanupScope>(*I)) {
1036      EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
1037      if (Scope.isEHCleanup())
1038        ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
1039                                  Scope.getEHExit());
1040    }
1041  }
1042
1043  Builder.ClearInsertionPoint();
1044}
1045