1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This coordinates the per-function state used while generating code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenModule.h"
20#include "CodeGenPGO.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/DeclCXX.h"
25#include "clang/AST/StmtCXX.h"
26#include "clang/Basic/TargetInfo.h"
27#include "clang/CodeGen/CGFunctionInfo.h"
28#include "clang/Frontend/CodeGenOptions.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Operator.h"
33using namespace clang;
34using namespace CodeGen;
35
36CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
37    : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
38      Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
39              CGBuilderInserterTy(this)), CapturedStmtInfo(nullptr),
40      SanOpts(&CGM.getLangOpts().Sanitize), AutoreleaseResult(false), BlockInfo(nullptr),
41      BlockPointer(nullptr), LambdaThisCaptureField(nullptr),
42      NormalCleanupDest(nullptr), NextCleanupDestIndex(1),
43      FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr),
44      EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()),
45      DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
46      PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
47      CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
48      NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
49      CXXABIThisValue(nullptr), CXXThisValue(nullptr),
50      CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
51      CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
52      CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
53      TerminateHandler(nullptr), TrapBB(nullptr) {
54  if (!suppressNewContext)
55    CGM.getCXXABI().getMangleContext().startNewFunction();
56
57  llvm::FastMathFlags FMF;
58  if (CGM.getLangOpts().FastMath)
59    FMF.setUnsafeAlgebra();
60  if (CGM.getLangOpts().FiniteMathOnly) {
61    FMF.setNoNaNs();
62    FMF.setNoInfs();
63  }
64  Builder.SetFastMathFlags(FMF);
65}
66
67CodeGenFunction::~CodeGenFunction() {
68  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
69
70  // If there are any unclaimed block infos, go ahead and destroy them
71  // now.  This can happen if IR-gen gets clever and skips evaluating
72  // something.
73  if (FirstBlockInfo)
74    destroyBlockInfos(FirstBlockInfo);
75
76  if (getLangOpts().OpenMP) {
77    CGM.getOpenMPRuntime().FunctionFinished(*this);
78  }
79}
80
81
82llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
83  return CGM.getTypes().ConvertTypeForMem(T);
84}
85
86llvm::Type *CodeGenFunction::ConvertType(QualType T) {
87  return CGM.getTypes().ConvertType(T);
88}
89
90TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
91  type = type.getCanonicalType();
92  while (true) {
93    switch (type->getTypeClass()) {
94#define TYPE(name, parent)
95#define ABSTRACT_TYPE(name, parent)
96#define NON_CANONICAL_TYPE(name, parent) case Type::name:
97#define DEPENDENT_TYPE(name, parent) case Type::name:
98#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
99#include "clang/AST/TypeNodes.def"
100      llvm_unreachable("non-canonical or dependent type in IR-generation");
101
102    case Type::Auto:
103      llvm_unreachable("undeduced auto type in IR-generation");
104
105    // Various scalar types.
106    case Type::Builtin:
107    case Type::Pointer:
108    case Type::BlockPointer:
109    case Type::LValueReference:
110    case Type::RValueReference:
111    case Type::MemberPointer:
112    case Type::Vector:
113    case Type::ExtVector:
114    case Type::FunctionProto:
115    case Type::FunctionNoProto:
116    case Type::Enum:
117    case Type::ObjCObjectPointer:
118      return TEK_Scalar;
119
120    // Complexes.
121    case Type::Complex:
122      return TEK_Complex;
123
124    // Arrays, records, and Objective-C objects.
125    case Type::ConstantArray:
126    case Type::IncompleteArray:
127    case Type::VariableArray:
128    case Type::Record:
129    case Type::ObjCObject:
130    case Type::ObjCInterface:
131      return TEK_Aggregate;
132
133    // We operate on atomic values according to their underlying type.
134    case Type::Atomic:
135      type = cast<AtomicType>(type)->getValueType();
136      continue;
137    }
138    llvm_unreachable("unknown type kind!");
139  }
140}
141
142void CodeGenFunction::EmitReturnBlock() {
143  // For cleanliness, we try to avoid emitting the return block for
144  // simple cases.
145  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
146
147  if (CurBB) {
148    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
149
150    // We have a valid insert point, reuse it if it is empty or there are no
151    // explicit jumps to the return block.
152    if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
153      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
154      delete ReturnBlock.getBlock();
155    } else
156      EmitBlock(ReturnBlock.getBlock());
157    return;
158  }
159
160  // Otherwise, if the return block is the target of a single direct
161  // branch then we can just put the code in that block instead. This
162  // cleans up functions which started with a unified return block.
163  if (ReturnBlock.getBlock()->hasOneUse()) {
164    llvm::BranchInst *BI =
165      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
166    if (BI && BI->isUnconditional() &&
167        BI->getSuccessor(0) == ReturnBlock.getBlock()) {
168      // Reset insertion point, including debug location, and delete the
169      // branch.  This is really subtle and only works because the next change
170      // in location will hit the caching in CGDebugInfo::EmitLocation and not
171      // override this.
172      Builder.SetCurrentDebugLocation(BI->getDebugLoc());
173      Builder.SetInsertPoint(BI->getParent());
174      BI->eraseFromParent();
175      delete ReturnBlock.getBlock();
176      return;
177    }
178  }
179
180  // FIXME: We are at an unreachable point, there is no reason to emit the block
181  // unless it has uses. However, we still need a place to put the debug
182  // region.end for now.
183
184  EmitBlock(ReturnBlock.getBlock());
185}
186
187static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
188  if (!BB) return;
189  if (!BB->use_empty())
190    return CGF.CurFn->getBasicBlockList().push_back(BB);
191  delete BB;
192}
193
194void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
195  assert(BreakContinueStack.empty() &&
196         "mismatched push/pop in break/continue stack!");
197
198  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
199    && NumSimpleReturnExprs == NumReturnExprs
200    && ReturnBlock.getBlock()->use_empty();
201  // Usually the return expression is evaluated before the cleanup
202  // code.  If the function contains only a simple return statement,
203  // such as a constant, the location before the cleanup code becomes
204  // the last useful breakpoint in the function, because the simple
205  // return expression will be evaluated after the cleanup code. To be
206  // safe, set the debug location for cleanup code to the location of
207  // the return statement.  Otherwise the cleanup code should be at the
208  // end of the function's lexical scope.
209  //
210  // If there are multiple branches to the return block, the branch
211  // instructions will get the location of the return statements and
212  // all will be fine.
213  if (CGDebugInfo *DI = getDebugInfo()) {
214    if (OnlySimpleReturnStmts)
215      DI->EmitLocation(Builder, LastStopPoint);
216    else
217      DI->EmitLocation(Builder, EndLoc);
218  }
219
220  // Pop any cleanups that might have been associated with the
221  // parameters.  Do this in whatever block we're currently in; it's
222  // important to do this before we enter the return block or return
223  // edges will be *really* confused.
224  bool EmitRetDbgLoc = true;
225  if (EHStack.stable_begin() != PrologueCleanupDepth) {
226    PopCleanupBlocks(PrologueCleanupDepth);
227
228    // Make sure the line table doesn't jump back into the body for
229    // the ret after it's been at EndLoc.
230    EmitRetDbgLoc = false;
231
232    if (CGDebugInfo *DI = getDebugInfo())
233      if (OnlySimpleReturnStmts)
234        DI->EmitLocation(Builder, EndLoc);
235  }
236
237  // Emit function epilog (to return).
238  EmitReturnBlock();
239
240  if (ShouldInstrumentFunction())
241    EmitFunctionInstrumentation("__cyg_profile_func_exit");
242
243  // Emit debug descriptor for function end.
244  if (CGDebugInfo *DI = getDebugInfo()) {
245    DI->EmitFunctionEnd(Builder);
246  }
247
248  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
249  EmitEndEHSpec(CurCodeDecl);
250
251  assert(EHStack.empty() &&
252         "did not remove all scopes from cleanup stack!");
253
254  // If someone did an indirect goto, emit the indirect goto block at the end of
255  // the function.
256  if (IndirectBranch) {
257    EmitBlock(IndirectBranch->getParent());
258    Builder.ClearInsertionPoint();
259  }
260
261  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
262  llvm::Instruction *Ptr = AllocaInsertPt;
263  AllocaInsertPt = nullptr;
264  Ptr->eraseFromParent();
265
266  // If someone took the address of a label but never did an indirect goto, we
267  // made a zero entry PHI node, which is illegal, zap it now.
268  if (IndirectBranch) {
269    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
270    if (PN->getNumIncomingValues() == 0) {
271      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
272      PN->eraseFromParent();
273    }
274  }
275
276  EmitIfUsed(*this, EHResumeBlock);
277  EmitIfUsed(*this, TerminateLandingPad);
278  EmitIfUsed(*this, TerminateHandler);
279  EmitIfUsed(*this, UnreachableBlock);
280
281  if (CGM.getCodeGenOpts().EmitDeclMetadata)
282    EmitDeclMetadata();
283
284  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
285           I = DeferredReplacements.begin(),
286           E = DeferredReplacements.end();
287       I != E; ++I) {
288    I->first->replaceAllUsesWith(I->second);
289    I->first->eraseFromParent();
290  }
291}
292
293/// ShouldInstrumentFunction - Return true if the current function should be
294/// instrumented with __cyg_profile_func_* calls
295bool CodeGenFunction::ShouldInstrumentFunction() {
296  if (!CGM.getCodeGenOpts().InstrumentFunctions)
297    return false;
298  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
299    return false;
300  return true;
301}
302
303/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
304/// instrumentation function with the current function and the call site, if
305/// function instrumentation is enabled.
306void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
307  // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
308  llvm::PointerType *PointerTy = Int8PtrTy;
309  llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
310  llvm::FunctionType *FunctionTy =
311    llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
312
313  llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
314  llvm::CallInst *CallSite = Builder.CreateCall(
315    CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
316    llvm::ConstantInt::get(Int32Ty, 0),
317    "callsite");
318
319  llvm::Value *args[] = {
320    llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
321    CallSite
322  };
323
324  EmitNounwindRuntimeCall(F, args);
325}
326
327void CodeGenFunction::EmitMCountInstrumentation() {
328  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
329
330  llvm::Constant *MCountFn =
331    CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
332  EmitNounwindRuntimeCall(MCountFn);
333}
334
335// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
336// information in the program executable. The argument information stored
337// includes the argument name, its type, the address and access qualifiers used.
338static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
339                                 CodeGenModule &CGM,llvm::LLVMContext &Context,
340                                 SmallVector <llvm::Value*, 5> &kernelMDArgs,
341                                 CGBuilderTy& Builder, ASTContext &ASTCtx) {
342  // Create MDNodes that represent the kernel arg metadata.
343  // Each MDNode is a list in the form of "key", N number of values which is
344  // the same number of values as their are kernel arguments.
345
346  const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
347
348  // MDNode for the kernel argument address space qualifiers.
349  SmallVector<llvm::Value*, 8> addressQuals;
350  addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
351
352  // MDNode for the kernel argument access qualifiers (images only).
353  SmallVector<llvm::Value*, 8> accessQuals;
354  accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
355
356  // MDNode for the kernel argument type names.
357  SmallVector<llvm::Value*, 8> argTypeNames;
358  argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
359
360  // MDNode for the kernel argument type qualifiers.
361  SmallVector<llvm::Value*, 8> argTypeQuals;
362  argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
363
364  // MDNode for the kernel argument names.
365  SmallVector<llvm::Value*, 8> argNames;
366  argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
367
368  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
369    const ParmVarDecl *parm = FD->getParamDecl(i);
370    QualType ty = parm->getType();
371    std::string typeQuals;
372
373    if (ty->isPointerType()) {
374      QualType pointeeTy = ty->getPointeeType();
375
376      // Get address qualifier.
377      addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace(
378        pointeeTy.getAddressSpace())));
379
380      // Get argument type name.
381      std::string typeName =
382          pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
383
384      // Turn "unsigned type" to "utype"
385      std::string::size_type pos = typeName.find("unsigned");
386      if (pos != std::string::npos)
387        typeName.erase(pos+1, 8);
388
389      argTypeNames.push_back(llvm::MDString::get(Context, typeName));
390
391      // Get argument type qualifiers:
392      if (ty.isRestrictQualified())
393        typeQuals = "restrict";
394      if (pointeeTy.isConstQualified() ||
395          (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
396        typeQuals += typeQuals.empty() ? "const" : " const";
397      if (pointeeTy.isVolatileQualified())
398        typeQuals += typeQuals.empty() ? "volatile" : " volatile";
399    } else {
400      uint32_t AddrSpc = 0;
401      if (ty->isImageType())
402        AddrSpc =
403          CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
404
405      addressQuals.push_back(Builder.getInt32(AddrSpc));
406
407      // Get argument type name.
408      std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
409
410      // Turn "unsigned type" to "utype"
411      std::string::size_type pos = typeName.find("unsigned");
412      if (pos != std::string::npos)
413        typeName.erase(pos+1, 8);
414
415      argTypeNames.push_back(llvm::MDString::get(Context, typeName));
416
417      // Get argument type qualifiers:
418      if (ty.isConstQualified())
419        typeQuals = "const";
420      if (ty.isVolatileQualified())
421        typeQuals += typeQuals.empty() ? "volatile" : " volatile";
422    }
423
424    argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
425
426    // Get image access qualifier:
427    if (ty->isImageType()) {
428      const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
429      if (A && A->isWriteOnly())
430        accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
431      else
432        accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
433      // FIXME: what about read_write?
434    } else
435      accessQuals.push_back(llvm::MDString::get(Context, "none"));
436
437    // Get argument name.
438    argNames.push_back(llvm::MDString::get(Context, parm->getName()));
439  }
440
441  kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
442  kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
443  kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
444  kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
445  kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
446}
447
448void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
449                                               llvm::Function *Fn)
450{
451  if (!FD->hasAttr<OpenCLKernelAttr>())
452    return;
453
454  llvm::LLVMContext &Context = getLLVMContext();
455
456  SmallVector <llvm::Value*, 5> kernelMDArgs;
457  kernelMDArgs.push_back(Fn);
458
459  if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
460    GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs,
461                         Builder, getContext());
462
463  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
464    QualType hintQTy = A->getTypeHint();
465    const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
466    bool isSignedInteger =
467        hintQTy->isSignedIntegerType() ||
468        (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
469    llvm::Value *attrMDArgs[] = {
470      llvm::MDString::get(Context, "vec_type_hint"),
471      llvm::UndefValue::get(CGM.getTypes().ConvertType(A->getTypeHint())),
472      llvm::ConstantInt::get(
473          llvm::IntegerType::get(Context, 32),
474          llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0)))
475    };
476    kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
477  }
478
479  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
480    llvm::Value *attrMDArgs[] = {
481      llvm::MDString::get(Context, "work_group_size_hint"),
482      Builder.getInt32(A->getXDim()),
483      Builder.getInt32(A->getYDim()),
484      Builder.getInt32(A->getZDim())
485    };
486    kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
487  }
488
489  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
490    llvm::Value *attrMDArgs[] = {
491      llvm::MDString::get(Context, "reqd_work_group_size"),
492      Builder.getInt32(A->getXDim()),
493      Builder.getInt32(A->getYDim()),
494      Builder.getInt32(A->getZDim())
495    };
496    kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
497  }
498
499  llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
500  llvm::NamedMDNode *OpenCLKernelMetadata =
501    CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
502  OpenCLKernelMetadata->addOperand(kernelMDNode);
503}
504
505/// Determine whether the function F ends with a return stmt.
506static bool endsWithReturn(const Decl* F) {
507  const Stmt *Body = nullptr;
508  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
509    Body = FD->getBody();
510  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
511    Body = OMD->getBody();
512
513  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
514    auto LastStmt = CS->body_rbegin();
515    if (LastStmt != CS->body_rend())
516      return isa<ReturnStmt>(*LastStmt);
517  }
518  return false;
519}
520
521void CodeGenFunction::StartFunction(GlobalDecl GD,
522                                    QualType RetTy,
523                                    llvm::Function *Fn,
524                                    const CGFunctionInfo &FnInfo,
525                                    const FunctionArgList &Args,
526                                    SourceLocation Loc,
527                                    SourceLocation StartLoc) {
528  const Decl *D = GD.getDecl();
529
530  DidCallStackSave = false;
531  CurCodeDecl = D;
532  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
533  FnRetTy = RetTy;
534  CurFn = Fn;
535  CurFnInfo = &FnInfo;
536  assert(CurFn->isDeclaration() && "Function already has body?");
537
538  if (CGM.getSanitizerBlacklist().isIn(*Fn))
539    SanOpts = &SanitizerOptions::Disabled;
540
541  // Pass inline keyword to optimizer if it appears explicitly on any
542  // declaration. Also, in the case of -fno-inline attach NoInline
543  // attribute to all function that are not marked AlwaysInline.
544  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
545    if (!CGM.getCodeGenOpts().NoInline) {
546      for (auto RI : FD->redecls())
547        if (RI->isInlineSpecified()) {
548          Fn->addFnAttr(llvm::Attribute::InlineHint);
549          break;
550        }
551    } else if (!FD->hasAttr<AlwaysInlineAttr>())
552      Fn->addFnAttr(llvm::Attribute::NoInline);
553  }
554
555  if (getLangOpts().OpenCL) {
556    // Add metadata for a kernel function.
557    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
558      EmitOpenCLKernelMetadata(FD, Fn);
559  }
560
561  // If we are checking function types, emit a function type signature as
562  // prefix data.
563  if (getLangOpts().CPlusPlus && SanOpts->Function) {
564    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
565      if (llvm::Constant *PrefixSig =
566              CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
567        llvm::Constant *FTRTTIConst =
568            CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
569        llvm::Constant *PrefixStructElems[] = { PrefixSig, FTRTTIConst };
570        llvm::Constant *PrefixStructConst =
571            llvm::ConstantStruct::getAnon(PrefixStructElems, /*Packed=*/true);
572        Fn->setPrefixData(PrefixStructConst);
573      }
574    }
575  }
576
577  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
578
579  // Create a marker to make it easy to insert allocas into the entryblock
580  // later.  Don't create this with the builder, because we don't want it
581  // folded.
582  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
583  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
584  if (Builder.isNamePreserving())
585    AllocaInsertPt->setName("allocapt");
586
587  ReturnBlock = getJumpDestInCurrentScope("return");
588
589  Builder.SetInsertPoint(EntryBB);
590
591  // Emit subprogram debug descriptor.
592  if (CGDebugInfo *DI = getDebugInfo()) {
593    SmallVector<QualType, 16> ArgTypes;
594    for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
595	 i != e; ++i) {
596      ArgTypes.push_back((*i)->getType());
597    }
598
599    QualType FnType =
600      getContext().getFunctionType(RetTy, ArgTypes,
601                                   FunctionProtoType::ExtProtoInfo());
602    DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
603  }
604
605  if (ShouldInstrumentFunction())
606    EmitFunctionInstrumentation("__cyg_profile_func_enter");
607
608  if (CGM.getCodeGenOpts().InstrumentForProfiling)
609    EmitMCountInstrumentation();
610
611  if (RetTy->isVoidType()) {
612    // Void type; nothing to return.
613    ReturnValue = nullptr;
614
615    // Count the implicit return.
616    if (!endsWithReturn(D))
617      ++NumReturnExprs;
618  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
619             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
620    // Indirect aggregate return; emit returned value directly into sret slot.
621    // This reduces code size, and affects correctness in C++.
622    auto AI = CurFn->arg_begin();
623    if (CurFnInfo->getReturnInfo().isSRetAfterThis())
624      ++AI;
625    ReturnValue = AI;
626  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
627             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
628    // Load the sret pointer from the argument struct and return into that.
629    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
630    llvm::Function::arg_iterator EI = CurFn->arg_end();
631    --EI;
632    llvm::Value *Addr = Builder.CreateStructGEP(EI, Idx);
633    ReturnValue = Builder.CreateLoad(Addr, "agg.result");
634  } else {
635    ReturnValue = CreateIRTemp(RetTy, "retval");
636
637    // Tell the epilog emitter to autorelease the result.  We do this
638    // now so that various specialized functions can suppress it
639    // during their IR-generation.
640    if (getLangOpts().ObjCAutoRefCount &&
641        !CurFnInfo->isReturnsRetained() &&
642        RetTy->isObjCRetainableType())
643      AutoreleaseResult = true;
644  }
645
646  EmitStartEHSpec(CurCodeDecl);
647
648  PrologueCleanupDepth = EHStack.stable_begin();
649  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
650
651  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
652    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
653    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
654    if (MD->getParent()->isLambda() &&
655        MD->getOverloadedOperator() == OO_Call) {
656      // We're in a lambda; figure out the captures.
657      MD->getParent()->getCaptureFields(LambdaCaptureFields,
658                                        LambdaThisCaptureField);
659      if (LambdaThisCaptureField) {
660        // If this lambda captures this, load it.
661        LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
662        CXXThisValue = EmitLoadOfLValue(ThisLValue,
663                                        SourceLocation()).getScalarVal();
664      }
665    } else {
666      // Not in a lambda; just use 'this' from the method.
667      // FIXME: Should we generate a new load for each use of 'this'?  The
668      // fast register allocator would be happier...
669      CXXThisValue = CXXABIThisValue;
670    }
671  }
672
673  // If any of the arguments have a variably modified type, make sure to
674  // emit the type size.
675  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
676       i != e; ++i) {
677    const VarDecl *VD = *i;
678
679    // Dig out the type as written from ParmVarDecls; it's unclear whether
680    // the standard (C99 6.9.1p10) requires this, but we're following the
681    // precedent set by gcc.
682    QualType Ty;
683    if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
684      Ty = PVD->getOriginalType();
685    else
686      Ty = VD->getType();
687
688    if (Ty->isVariablyModifiedType())
689      EmitVariablyModifiedType(Ty);
690  }
691  // Emit a location at the end of the prologue.
692  if (CGDebugInfo *DI = getDebugInfo())
693    DI->EmitLocation(Builder, StartLoc);
694}
695
696void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
697                                       const Stmt *Body) {
698  RegionCounter Cnt = getPGORegionCounter(Body);
699  Cnt.beginRegion(Builder);
700  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
701    EmitCompoundStmtWithoutScope(*S);
702  else
703    EmitStmt(Body);
704}
705
706/// When instrumenting to collect profile data, the counts for some blocks
707/// such as switch cases need to not include the fall-through counts, so
708/// emit a branch around the instrumentation code. When not instrumenting,
709/// this just calls EmitBlock().
710void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
711                                               RegionCounter &Cnt) {
712  llvm::BasicBlock *SkipCountBB = nullptr;
713  if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
714    // When instrumenting for profiling, the fallthrough to certain
715    // statements needs to skip over the instrumentation code so that we
716    // get an accurate count.
717    SkipCountBB = createBasicBlock("skipcount");
718    EmitBranch(SkipCountBB);
719  }
720  EmitBlock(BB);
721  Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true);
722  if (SkipCountBB)
723    EmitBlock(SkipCountBB);
724}
725
726/// Tries to mark the given function nounwind based on the
727/// non-existence of any throwing calls within it.  We believe this is
728/// lightweight enough to do at -O0.
729static void TryMarkNoThrow(llvm::Function *F) {
730  // LLVM treats 'nounwind' on a function as part of the type, so we
731  // can't do this on functions that can be overwritten.
732  if (F->mayBeOverridden()) return;
733
734  for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
735    for (llvm::BasicBlock::iterator
736           BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
737      if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
738        if (!Call->doesNotThrow())
739          return;
740      } else if (isa<llvm::ResumeInst>(&*BI)) {
741        return;
742      }
743  F->setDoesNotThrow();
744}
745
746static void EmitSizedDeallocationFunction(CodeGenFunction &CGF,
747                                          const FunctionDecl *UnsizedDealloc) {
748  // This is a weak discardable definition of the sized deallocation function.
749  CGF.CurFn->setLinkage(llvm::Function::LinkOnceAnyLinkage);
750
751  // Call the unsized deallocation function and forward the first argument
752  // unchanged.
753  llvm::Constant *Unsized = CGF.CGM.GetAddrOfFunction(UnsizedDealloc);
754  CGF.Builder.CreateCall(Unsized, &*CGF.CurFn->arg_begin());
755}
756
757void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
758                                   const CGFunctionInfo &FnInfo) {
759  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
760
761  // Check if we should generate debug info for this function.
762  if (FD->hasAttr<NoDebugAttr>())
763    DebugInfo = nullptr; // disable debug info indefinitely for this function
764
765  FunctionArgList Args;
766  QualType ResTy = FD->getReturnType();
767
768  CurGD = GD;
769  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
770  if (MD && MD->isInstance()) {
771    if (CGM.getCXXABI().HasThisReturn(GD))
772      ResTy = MD->getThisType(getContext());
773    CGM.getCXXABI().buildThisParam(*this, Args);
774  }
775
776  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
777    Args.push_back(FD->getParamDecl(i));
778
779  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
780    CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
781
782  SourceRange BodyRange;
783  if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
784  CurEHLocation = BodyRange.getEnd();
785
786  // Use the location of the start of the function to determine where
787  // the function definition is located. By default use the location
788  // of the declaration as the location for the subprogram. A function
789  // may lack a declaration in the source code if it is created by code
790  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
791  SourceLocation Loc = FD->getLocation();
792
793  // If this is a function specialization then use the pattern body
794  // as the location for the function.
795  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
796    if (SpecDecl->hasBody(SpecDecl))
797      Loc = SpecDecl->getLocation();
798
799  // Emit the standard function prologue.
800  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
801
802  // Generate the body of the function.
803  PGO.assignRegionCounters(GD.getDecl(), CurFn);
804  if (isa<CXXDestructorDecl>(FD))
805    EmitDestructorBody(Args);
806  else if (isa<CXXConstructorDecl>(FD))
807    EmitConstructorBody(Args);
808  else if (getLangOpts().CUDA &&
809           !CGM.getCodeGenOpts().CUDAIsDevice &&
810           FD->hasAttr<CUDAGlobalAttr>())
811    CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
812  else if (isa<CXXConversionDecl>(FD) &&
813           cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
814    // The lambda conversion to block pointer is special; the semantics can't be
815    // expressed in the AST, so IRGen needs to special-case it.
816    EmitLambdaToBlockPointerBody(Args);
817  } else if (isa<CXXMethodDecl>(FD) &&
818             cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
819    // The lambda static invoker function is special, because it forwards or
820    // clones the body of the function call operator (but is actually static).
821    EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
822  } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
823             (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
824              cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
825    // Implicit copy-assignment gets the same special treatment as implicit
826    // copy-constructors.
827    emitImplicitAssignmentOperatorBody(Args);
828  } else if (Stmt *Body = FD->getBody()) {
829    EmitFunctionBody(Args, Body);
830  } else if (FunctionDecl *UnsizedDealloc =
831                 FD->getCorrespondingUnsizedGlobalDeallocationFunction()) {
832    // Global sized deallocation functions get an implicit weak definition if
833    // they don't have an explicit definition.
834    EmitSizedDeallocationFunction(*this, UnsizedDealloc);
835  } else
836    llvm_unreachable("no definition for emitted function");
837
838  // C++11 [stmt.return]p2:
839  //   Flowing off the end of a function [...] results in undefined behavior in
840  //   a value-returning function.
841  // C11 6.9.1p12:
842  //   If the '}' that terminates a function is reached, and the value of the
843  //   function call is used by the caller, the behavior is undefined.
844  if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
845      !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
846    if (SanOpts->Return)
847      EmitCheck(Builder.getFalse(), "missing_return",
848                EmitCheckSourceLocation(FD->getLocation()),
849                ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
850    else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
851      Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
852    Builder.CreateUnreachable();
853    Builder.ClearInsertionPoint();
854  }
855
856  // Emit the standard function epilogue.
857  FinishFunction(BodyRange.getEnd());
858
859  // If we haven't marked the function nothrow through other means, do
860  // a quick pass now to see if we can.
861  if (!CurFn->doesNotThrow())
862    TryMarkNoThrow(CurFn);
863
864  PGO.emitInstrumentationData();
865  PGO.destroyRegionCounters();
866}
867
868/// ContainsLabel - Return true if the statement contains a label in it.  If
869/// this statement is not executed normally, it not containing a label means
870/// that we can just remove the code.
871bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
872  // Null statement, not a label!
873  if (!S) return false;
874
875  // If this is a label, we have to emit the code, consider something like:
876  // if (0) {  ...  foo:  bar(); }  goto foo;
877  //
878  // TODO: If anyone cared, we could track __label__'s, since we know that you
879  // can't jump to one from outside their declared region.
880  if (isa<LabelStmt>(S))
881    return true;
882
883  // If this is a case/default statement, and we haven't seen a switch, we have
884  // to emit the code.
885  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
886    return true;
887
888  // If this is a switch statement, we want to ignore cases below it.
889  if (isa<SwitchStmt>(S))
890    IgnoreCaseStmts = true;
891
892  // Scan subexpressions for verboten labels.
893  for (Stmt::const_child_range I = S->children(); I; ++I)
894    if (ContainsLabel(*I, IgnoreCaseStmts))
895      return true;
896
897  return false;
898}
899
900/// containsBreak - Return true if the statement contains a break out of it.
901/// If the statement (recursively) contains a switch or loop with a break
902/// inside of it, this is fine.
903bool CodeGenFunction::containsBreak(const Stmt *S) {
904  // Null statement, not a label!
905  if (!S) return false;
906
907  // If this is a switch or loop that defines its own break scope, then we can
908  // include it and anything inside of it.
909  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
910      isa<ForStmt>(S))
911    return false;
912
913  if (isa<BreakStmt>(S))
914    return true;
915
916  // Scan subexpressions for verboten breaks.
917  for (Stmt::const_child_range I = S->children(); I; ++I)
918    if (containsBreak(*I))
919      return true;
920
921  return false;
922}
923
924
925/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
926/// to a constant, or if it does but contains a label, return false.  If it
927/// constant folds return true and set the boolean result in Result.
928bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
929                                                   bool &ResultBool) {
930  llvm::APSInt ResultInt;
931  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
932    return false;
933
934  ResultBool = ResultInt.getBoolValue();
935  return true;
936}
937
938/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
939/// to a constant, or if it does but contains a label, return false.  If it
940/// constant folds return true and set the folded value.
941bool CodeGenFunction::
942ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
943  // FIXME: Rename and handle conversion of other evaluatable things
944  // to bool.
945  llvm::APSInt Int;
946  if (!Cond->EvaluateAsInt(Int, getContext()))
947    return false;  // Not foldable, not integer or not fully evaluatable.
948
949  if (CodeGenFunction::ContainsLabel(Cond))
950    return false;  // Contains a label.
951
952  ResultInt = Int;
953  return true;
954}
955
956
957
958/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
959/// statement) to the specified blocks.  Based on the condition, this might try
960/// to simplify the codegen of the conditional based on the branch.
961///
962void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
963                                           llvm::BasicBlock *TrueBlock,
964                                           llvm::BasicBlock *FalseBlock,
965                                           uint64_t TrueCount) {
966  Cond = Cond->IgnoreParens();
967
968  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
969
970    // Handle X && Y in a condition.
971    if (CondBOp->getOpcode() == BO_LAnd) {
972      RegionCounter Cnt = getPGORegionCounter(CondBOp);
973
974      // If we have "1 && X", simplify the code.  "0 && X" would have constant
975      // folded if the case was simple enough.
976      bool ConstantBool = false;
977      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
978          ConstantBool) {
979        // br(1 && X) -> br(X).
980        Cnt.beginRegion(Builder);
981        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
982                                    TrueCount);
983      }
984
985      // If we have "X && 1", simplify the code to use an uncond branch.
986      // "X && 0" would have been constant folded to 0.
987      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
988          ConstantBool) {
989        // br(X && 1) -> br(X).
990        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
991                                    TrueCount);
992      }
993
994      // Emit the LHS as a conditional.  If the LHS conditional is false, we
995      // want to jump to the FalseBlock.
996      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
997      // The counter tells us how often we evaluate RHS, and all of TrueCount
998      // can be propagated to that branch.
999      uint64_t RHSCount = Cnt.getCount();
1000
1001      ConditionalEvaluation eval(*this);
1002      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1003      EmitBlock(LHSTrue);
1004
1005      // Any temporaries created here are conditional.
1006      Cnt.beginRegion(Builder);
1007      eval.begin(*this);
1008      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1009      eval.end(*this);
1010
1011      return;
1012    }
1013
1014    if (CondBOp->getOpcode() == BO_LOr) {
1015      RegionCounter Cnt = getPGORegionCounter(CondBOp);
1016
1017      // If we have "0 || X", simplify the code.  "1 || X" would have constant
1018      // folded if the case was simple enough.
1019      bool ConstantBool = false;
1020      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1021          !ConstantBool) {
1022        // br(0 || X) -> br(X).
1023        Cnt.beginRegion(Builder);
1024        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1025                                    TrueCount);
1026      }
1027
1028      // If we have "X || 0", simplify the code to use an uncond branch.
1029      // "X || 1" would have been constant folded to 1.
1030      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1031          !ConstantBool) {
1032        // br(X || 0) -> br(X).
1033        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1034                                    TrueCount);
1035      }
1036
1037      // Emit the LHS as a conditional.  If the LHS conditional is true, we
1038      // want to jump to the TrueBlock.
1039      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1040      // We have the count for entry to the RHS and for the whole expression
1041      // being true, so we can divy up True count between the short circuit and
1042      // the RHS.
1043      uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount();
1044      uint64_t RHSCount = TrueCount - LHSCount;
1045
1046      ConditionalEvaluation eval(*this);
1047      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1048      EmitBlock(LHSFalse);
1049
1050      // Any temporaries created here are conditional.
1051      Cnt.beginRegion(Builder);
1052      eval.begin(*this);
1053      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1054
1055      eval.end(*this);
1056
1057      return;
1058    }
1059  }
1060
1061  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1062    // br(!x, t, f) -> br(x, f, t)
1063    if (CondUOp->getOpcode() == UO_LNot) {
1064      // Negate the count.
1065      uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount;
1066      // Negate the condition and swap the destination blocks.
1067      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1068                                  FalseCount);
1069    }
1070  }
1071
1072  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1073    // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1074    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1075    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1076
1077    RegionCounter Cnt = getPGORegionCounter(CondOp);
1078    ConditionalEvaluation cond(*this);
1079    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
1080
1081    // When computing PGO branch weights, we only know the overall count for
1082    // the true block. This code is essentially doing tail duplication of the
1083    // naive code-gen, introducing new edges for which counts are not
1084    // available. Divide the counts proportionally between the LHS and RHS of
1085    // the conditional operator.
1086    uint64_t LHSScaledTrueCount = 0;
1087    if (TrueCount) {
1088      double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount();
1089      LHSScaledTrueCount = TrueCount * LHSRatio;
1090    }
1091
1092    cond.begin(*this);
1093    EmitBlock(LHSBlock);
1094    Cnt.beginRegion(Builder);
1095    EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1096                         LHSScaledTrueCount);
1097    cond.end(*this);
1098
1099    cond.begin(*this);
1100    EmitBlock(RHSBlock);
1101    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1102                         TrueCount - LHSScaledTrueCount);
1103    cond.end(*this);
1104
1105    return;
1106  }
1107
1108  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1109    // Conditional operator handling can give us a throw expression as a
1110    // condition for a case like:
1111    //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1112    // Fold this to:
1113    //   br(c, throw x, br(y, t, f))
1114    EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1115    return;
1116  }
1117
1118  // Create branch weights based on the number of times we get here and the
1119  // number of times the condition should be true.
1120  uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount);
1121  llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount,
1122                                                  CurrentCount - TrueCount);
1123
1124  // Emit the code with the fully general case.
1125  llvm::Value *CondV = EvaluateExprAsBool(Cond);
1126  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
1127}
1128
1129/// ErrorUnsupported - Print out an error that codegen doesn't support the
1130/// specified stmt yet.
1131void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1132  CGM.ErrorUnsupported(S, Type);
1133}
1134
1135/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1136/// variable-length array whose elements have a non-zero bit-pattern.
1137///
1138/// \param baseType the inner-most element type of the array
1139/// \param src - a char* pointing to the bit-pattern for a single
1140/// base element of the array
1141/// \param sizeInChars - the total size of the VLA, in chars
1142static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1143                               llvm::Value *dest, llvm::Value *src,
1144                               llvm::Value *sizeInChars) {
1145  std::pair<CharUnits,CharUnits> baseSizeAndAlign
1146    = CGF.getContext().getTypeInfoInChars(baseType);
1147
1148  CGBuilderTy &Builder = CGF.Builder;
1149
1150  llvm::Value *baseSizeInChars
1151    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
1152
1153  llvm::Type *i8p = Builder.getInt8PtrTy();
1154
1155  llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
1156  llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
1157
1158  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1159  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1160  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1161
1162  // Make a loop over the VLA.  C99 guarantees that the VLA element
1163  // count must be nonzero.
1164  CGF.EmitBlock(loopBB);
1165
1166  llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
1167  cur->addIncoming(begin, originBB);
1168
1169  // memcpy the individual element bit-pattern.
1170  Builder.CreateMemCpy(cur, src, baseSizeInChars,
1171                       baseSizeAndAlign.second.getQuantity(),
1172                       /*volatile*/ false);
1173
1174  // Go to the next element.
1175  llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
1176
1177  // Leave if that's the end of the VLA.
1178  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1179  Builder.CreateCondBr(done, contBB, loopBB);
1180  cur->addIncoming(next, loopBB);
1181
1182  CGF.EmitBlock(contBB);
1183}
1184
1185void
1186CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
1187  // Ignore empty classes in C++.
1188  if (getLangOpts().CPlusPlus) {
1189    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1190      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1191        return;
1192    }
1193  }
1194
1195  // Cast the dest ptr to the appropriate i8 pointer type.
1196  unsigned DestAS =
1197    cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
1198  llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
1199  if (DestPtr->getType() != BP)
1200    DestPtr = Builder.CreateBitCast(DestPtr, BP);
1201
1202  // Get size and alignment info for this aggregate.
1203  std::pair<CharUnits, CharUnits> TypeInfo =
1204    getContext().getTypeInfoInChars(Ty);
1205  CharUnits Size = TypeInfo.first;
1206  CharUnits Align = TypeInfo.second;
1207
1208  llvm::Value *SizeVal;
1209  const VariableArrayType *vla;
1210
1211  // Don't bother emitting a zero-byte memset.
1212  if (Size.isZero()) {
1213    // But note that getTypeInfo returns 0 for a VLA.
1214    if (const VariableArrayType *vlaType =
1215          dyn_cast_or_null<VariableArrayType>(
1216                                          getContext().getAsArrayType(Ty))) {
1217      QualType eltType;
1218      llvm::Value *numElts;
1219      std::tie(numElts, eltType) = getVLASize(vlaType);
1220
1221      SizeVal = numElts;
1222      CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
1223      if (!eltSize.isOne())
1224        SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1225      vla = vlaType;
1226    } else {
1227      return;
1228    }
1229  } else {
1230    SizeVal = CGM.getSize(Size);
1231    vla = nullptr;
1232  }
1233
1234  // If the type contains a pointer to data member we can't memset it to zero.
1235  // Instead, create a null constant and copy it to the destination.
1236  // TODO: there are other patterns besides zero that we can usefully memset,
1237  // like -1, which happens to be the pattern used by member-pointers.
1238  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1239    // For a VLA, emit a single element, then splat that over the VLA.
1240    if (vla) Ty = getContext().getBaseElementType(vla);
1241
1242    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1243
1244    llvm::GlobalVariable *NullVariable =
1245      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1246                               /*isConstant=*/true,
1247                               llvm::GlobalVariable::PrivateLinkage,
1248                               NullConstant, Twine());
1249    llvm::Value *SrcPtr =
1250      Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
1251
1252    if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1253
1254    // Get and call the appropriate llvm.memcpy overload.
1255    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
1256    return;
1257  }
1258
1259  // Otherwise, just memset the whole thing to zero.  This is legal
1260  // because in LLVM, all default initializers (other than the ones we just
1261  // handled above) are guaranteed to have a bit pattern of all zeros.
1262  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
1263                       Align.getQuantity(), false);
1264}
1265
1266llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1267  // Make sure that there is a block for the indirect goto.
1268  if (!IndirectBranch)
1269    GetIndirectGotoBlock();
1270
1271  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1272
1273  // Make sure the indirect branch includes all of the address-taken blocks.
1274  IndirectBranch->addDestination(BB);
1275  return llvm::BlockAddress::get(CurFn, BB);
1276}
1277
1278llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1279  // If we already made the indirect branch for indirect goto, return its block.
1280  if (IndirectBranch) return IndirectBranch->getParent();
1281
1282  CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
1283
1284  // Create the PHI node that indirect gotos will add entries to.
1285  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1286                                              "indirect.goto.dest");
1287
1288  // Create the indirect branch instruction.
1289  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1290  return IndirectBranch->getParent();
1291}
1292
1293/// Computes the length of an array in elements, as well as the base
1294/// element type and a properly-typed first element pointer.
1295llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1296                                              QualType &baseType,
1297                                              llvm::Value *&addr) {
1298  const ArrayType *arrayType = origArrayType;
1299
1300  // If it's a VLA, we have to load the stored size.  Note that
1301  // this is the size of the VLA in bytes, not its size in elements.
1302  llvm::Value *numVLAElements = nullptr;
1303  if (isa<VariableArrayType>(arrayType)) {
1304    numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
1305
1306    // Walk into all VLAs.  This doesn't require changes to addr,
1307    // which has type T* where T is the first non-VLA element type.
1308    do {
1309      QualType elementType = arrayType->getElementType();
1310      arrayType = getContext().getAsArrayType(elementType);
1311
1312      // If we only have VLA components, 'addr' requires no adjustment.
1313      if (!arrayType) {
1314        baseType = elementType;
1315        return numVLAElements;
1316      }
1317    } while (isa<VariableArrayType>(arrayType));
1318
1319    // We get out here only if we find a constant array type
1320    // inside the VLA.
1321  }
1322
1323  // We have some number of constant-length arrays, so addr should
1324  // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1325  // down to the first element of addr.
1326  SmallVector<llvm::Value*, 8> gepIndices;
1327
1328  // GEP down to the array type.
1329  llvm::ConstantInt *zero = Builder.getInt32(0);
1330  gepIndices.push_back(zero);
1331
1332  uint64_t countFromCLAs = 1;
1333  QualType eltType;
1334
1335  llvm::ArrayType *llvmArrayType =
1336    dyn_cast<llvm::ArrayType>(
1337      cast<llvm::PointerType>(addr->getType())->getElementType());
1338  while (llvmArrayType) {
1339    assert(isa<ConstantArrayType>(arrayType));
1340    assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1341             == llvmArrayType->getNumElements());
1342
1343    gepIndices.push_back(zero);
1344    countFromCLAs *= llvmArrayType->getNumElements();
1345    eltType = arrayType->getElementType();
1346
1347    llvmArrayType =
1348      dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1349    arrayType = getContext().getAsArrayType(arrayType->getElementType());
1350    assert((!llvmArrayType || arrayType) &&
1351           "LLVM and Clang types are out-of-synch");
1352  }
1353
1354  if (arrayType) {
1355    // From this point onwards, the Clang array type has been emitted
1356    // as some other type (probably a packed struct). Compute the array
1357    // size, and just emit the 'begin' expression as a bitcast.
1358    while (arrayType) {
1359      countFromCLAs *=
1360          cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1361      eltType = arrayType->getElementType();
1362      arrayType = getContext().getAsArrayType(eltType);
1363    }
1364
1365    unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
1366    llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
1367    addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
1368  } else {
1369    // Create the actual GEP.
1370    addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
1371  }
1372
1373  baseType = eltType;
1374
1375  llvm::Value *numElements
1376    = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1377
1378  // If we had any VLA dimensions, factor them in.
1379  if (numVLAElements)
1380    numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1381
1382  return numElements;
1383}
1384
1385std::pair<llvm::Value*, QualType>
1386CodeGenFunction::getVLASize(QualType type) {
1387  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1388  assert(vla && "type was not a variable array type!");
1389  return getVLASize(vla);
1390}
1391
1392std::pair<llvm::Value*, QualType>
1393CodeGenFunction::getVLASize(const VariableArrayType *type) {
1394  // The number of elements so far; always size_t.
1395  llvm::Value *numElements = nullptr;
1396
1397  QualType elementType;
1398  do {
1399    elementType = type->getElementType();
1400    llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1401    assert(vlaSize && "no size for VLA!");
1402    assert(vlaSize->getType() == SizeTy);
1403
1404    if (!numElements) {
1405      numElements = vlaSize;
1406    } else {
1407      // It's undefined behavior if this wraps around, so mark it that way.
1408      // FIXME: Teach -fsanitize=undefined to trap this.
1409      numElements = Builder.CreateNUWMul(numElements, vlaSize);
1410    }
1411  } while ((type = getContext().getAsVariableArrayType(elementType)));
1412
1413  return std::pair<llvm::Value*,QualType>(numElements, elementType);
1414}
1415
1416void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1417  assert(type->isVariablyModifiedType() &&
1418         "Must pass variably modified type to EmitVLASizes!");
1419
1420  EnsureInsertPoint();
1421
1422  // We're going to walk down into the type and look for VLA
1423  // expressions.
1424  do {
1425    assert(type->isVariablyModifiedType());
1426
1427    const Type *ty = type.getTypePtr();
1428    switch (ty->getTypeClass()) {
1429
1430#define TYPE(Class, Base)
1431#define ABSTRACT_TYPE(Class, Base)
1432#define NON_CANONICAL_TYPE(Class, Base)
1433#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1434#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1435#include "clang/AST/TypeNodes.def"
1436      llvm_unreachable("unexpected dependent type!");
1437
1438    // These types are never variably-modified.
1439    case Type::Builtin:
1440    case Type::Complex:
1441    case Type::Vector:
1442    case Type::ExtVector:
1443    case Type::Record:
1444    case Type::Enum:
1445    case Type::Elaborated:
1446    case Type::TemplateSpecialization:
1447    case Type::ObjCObject:
1448    case Type::ObjCInterface:
1449    case Type::ObjCObjectPointer:
1450      llvm_unreachable("type class is never variably-modified!");
1451
1452    case Type::Adjusted:
1453      type = cast<AdjustedType>(ty)->getAdjustedType();
1454      break;
1455
1456    case Type::Decayed:
1457      type = cast<DecayedType>(ty)->getPointeeType();
1458      break;
1459
1460    case Type::Pointer:
1461      type = cast<PointerType>(ty)->getPointeeType();
1462      break;
1463
1464    case Type::BlockPointer:
1465      type = cast<BlockPointerType>(ty)->getPointeeType();
1466      break;
1467
1468    case Type::LValueReference:
1469    case Type::RValueReference:
1470      type = cast<ReferenceType>(ty)->getPointeeType();
1471      break;
1472
1473    case Type::MemberPointer:
1474      type = cast<MemberPointerType>(ty)->getPointeeType();
1475      break;
1476
1477    case Type::ConstantArray:
1478    case Type::IncompleteArray:
1479      // Losing element qualification here is fine.
1480      type = cast<ArrayType>(ty)->getElementType();
1481      break;
1482
1483    case Type::VariableArray: {
1484      // Losing element qualification here is fine.
1485      const VariableArrayType *vat = cast<VariableArrayType>(ty);
1486
1487      // Unknown size indication requires no size computation.
1488      // Otherwise, evaluate and record it.
1489      if (const Expr *size = vat->getSizeExpr()) {
1490        // It's possible that we might have emitted this already,
1491        // e.g. with a typedef and a pointer to it.
1492        llvm::Value *&entry = VLASizeMap[size];
1493        if (!entry) {
1494          llvm::Value *Size = EmitScalarExpr(size);
1495
1496          // C11 6.7.6.2p5:
1497          //   If the size is an expression that is not an integer constant
1498          //   expression [...] each time it is evaluated it shall have a value
1499          //   greater than zero.
1500          if (SanOpts->VLABound &&
1501              size->getType()->isSignedIntegerType()) {
1502            llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1503            llvm::Constant *StaticArgs[] = {
1504              EmitCheckSourceLocation(size->getLocStart()),
1505              EmitCheckTypeDescriptor(size->getType())
1506            };
1507            EmitCheck(Builder.CreateICmpSGT(Size, Zero),
1508                      "vla_bound_not_positive", StaticArgs, Size,
1509                      CRK_Recoverable);
1510          }
1511
1512          // Always zexting here would be wrong if it weren't
1513          // undefined behavior to have a negative bound.
1514          entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1515        }
1516      }
1517      type = vat->getElementType();
1518      break;
1519    }
1520
1521    case Type::FunctionProto:
1522    case Type::FunctionNoProto:
1523      type = cast<FunctionType>(ty)->getReturnType();
1524      break;
1525
1526    case Type::Paren:
1527    case Type::TypeOf:
1528    case Type::UnaryTransform:
1529    case Type::Attributed:
1530    case Type::SubstTemplateTypeParm:
1531    case Type::PackExpansion:
1532      // Keep walking after single level desugaring.
1533      type = type.getSingleStepDesugaredType(getContext());
1534      break;
1535
1536    case Type::Typedef:
1537    case Type::Decltype:
1538    case Type::Auto:
1539      // Stop walking: nothing to do.
1540      return;
1541
1542    case Type::TypeOfExpr:
1543      // Stop walking: emit typeof expression.
1544      EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1545      return;
1546
1547    case Type::Atomic:
1548      type = cast<AtomicType>(ty)->getValueType();
1549      break;
1550    }
1551  } while (type->isVariablyModifiedType());
1552}
1553
1554llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
1555  if (getContext().getBuiltinVaListType()->isArrayType())
1556    return EmitScalarExpr(E);
1557  return EmitLValue(E).getAddress();
1558}
1559
1560void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
1561                                              llvm::Constant *Init) {
1562  assert (Init && "Invalid DeclRefExpr initializer!");
1563  if (CGDebugInfo *Dbg = getDebugInfo())
1564    if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
1565      Dbg->EmitGlobalVariable(E->getDecl(), Init);
1566}
1567
1568CodeGenFunction::PeepholeProtection
1569CodeGenFunction::protectFromPeepholes(RValue rvalue) {
1570  // At the moment, the only aggressive peephole we do in IR gen
1571  // is trunc(zext) folding, but if we add more, we can easily
1572  // extend this protection.
1573
1574  if (!rvalue.isScalar()) return PeepholeProtection();
1575  llvm::Value *value = rvalue.getScalarVal();
1576  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
1577
1578  // Just make an extra bitcast.
1579  assert(HaveInsertPoint());
1580  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
1581                                                  Builder.GetInsertBlock());
1582
1583  PeepholeProtection protection;
1584  protection.Inst = inst;
1585  return protection;
1586}
1587
1588void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
1589  if (!protection.Inst) return;
1590
1591  // In theory, we could try to duplicate the peepholes now, but whatever.
1592  protection.Inst->eraseFromParent();
1593}
1594
1595llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
1596                                                 llvm::Value *AnnotatedVal,
1597                                                 StringRef AnnotationStr,
1598                                                 SourceLocation Location) {
1599  llvm::Value *Args[4] = {
1600    AnnotatedVal,
1601    Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
1602    Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
1603    CGM.EmitAnnotationLineNo(Location)
1604  };
1605  return Builder.CreateCall(AnnotationFn, Args);
1606}
1607
1608void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
1609  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1610  // FIXME We create a new bitcast for every annotation because that's what
1611  // llvm-gcc was doing.
1612  for (const auto *I : D->specific_attrs<AnnotateAttr>())
1613    EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
1614                       Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
1615                       I->getAnnotation(), D->getLocation());
1616}
1617
1618llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
1619                                                   llvm::Value *V) {
1620  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1621  llvm::Type *VTy = V->getType();
1622  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
1623                                    CGM.Int8PtrTy);
1624
1625  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
1626    // FIXME Always emit the cast inst so we can differentiate between
1627    // annotation on the first field of a struct and annotation on the struct
1628    // itself.
1629    if (VTy != CGM.Int8PtrTy)
1630      V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
1631    V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
1632    V = Builder.CreateBitCast(V, VTy);
1633  }
1634
1635  return V;
1636}
1637
1638CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
1639
1640void CodeGenFunction::InsertHelper(llvm::Instruction *I,
1641                                   const llvm::Twine &Name,
1642                                   llvm::BasicBlock *BB,
1643                                   llvm::BasicBlock::iterator InsertPt) const {
1644  LoopStack.InsertHelper(I);
1645}
1646
1647template <bool PreserveNames>
1648void CGBuilderInserter<PreserveNames>::InsertHelper(
1649    llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
1650    llvm::BasicBlock::iterator InsertPt) const {
1651  llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
1652                                                              InsertPt);
1653  if (CGF)
1654    CGF->InsertHelper(I, Name, BB, InsertPt);
1655}
1656
1657#ifdef NDEBUG
1658#define PreserveNames false
1659#else
1660#define PreserveNames true
1661#endif
1662template void CGBuilderInserter<PreserveNames>::InsertHelper(
1663    llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
1664    llvm::BasicBlock::iterator InsertPt) const;
1665#undef PreserveNames
1666