1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This coordinates the per-function state used while generating code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CGCUDARuntime.h" 16#include "CGCXXABI.h" 17#include "CGDebugInfo.h" 18#include "CGOpenMPRuntime.h" 19#include "CodeGenModule.h" 20#include "CodeGenPGO.h" 21#include "TargetInfo.h" 22#include "clang/AST/ASTContext.h" 23#include "clang/AST/Decl.h" 24#include "clang/AST/DeclCXX.h" 25#include "clang/AST/StmtCXX.h" 26#include "clang/Basic/TargetInfo.h" 27#include "clang/CodeGen/CGFunctionInfo.h" 28#include "clang/Frontend/CodeGenOptions.h" 29#include "llvm/IR/DataLayout.h" 30#include "llvm/IR/Intrinsics.h" 31#include "llvm/IR/MDBuilder.h" 32#include "llvm/IR/Operator.h" 33using namespace clang; 34using namespace CodeGen; 35 36CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 37 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 38 Builder(cgm.getModule().getContext(), llvm::ConstantFolder(), 39 CGBuilderInserterTy(this)), 40 CurFn(nullptr), CapturedStmtInfo(nullptr), 41 SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false), 42 CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false), 43 IsOutlinedSEHHelper(false), BlockInfo(nullptr), BlockPointer(nullptr), 44 LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr), 45 NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr), 46 ExceptionSlot(nullptr), EHSelectorSlot(nullptr), 47 AbnormalTerminationSlot(nullptr), SEHPointersDecl(nullptr), 48 DebugInfo(CGM.getModuleDebugInfo()), DisableDebugInfo(false), 49 DidCallStackSave(false), IndirectBranch(nullptr), PGO(cgm), 50 SwitchInsn(nullptr), SwitchWeights(nullptr), CaseRangeBlock(nullptr), 51 UnreachableBlock(nullptr), NumReturnExprs(0), NumSimpleReturnExprs(0), 52 CXXABIThisDecl(nullptr), CXXABIThisValue(nullptr), CXXThisValue(nullptr), 53 CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr), 54 CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr), 55 CurLexicalScope(nullptr), TerminateLandingPad(nullptr), 56 TerminateHandler(nullptr), TrapBB(nullptr) { 57 if (!suppressNewContext) 58 CGM.getCXXABI().getMangleContext().startNewFunction(); 59 60 llvm::FastMathFlags FMF; 61 if (CGM.getLangOpts().FastMath) 62 FMF.setUnsafeAlgebra(); 63 if (CGM.getLangOpts().FiniteMathOnly) { 64 FMF.setNoNaNs(); 65 FMF.setNoInfs(); 66 } 67 if (CGM.getCodeGenOpts().NoNaNsFPMath) { 68 FMF.setNoNaNs(); 69 } 70 if (CGM.getCodeGenOpts().NoSignedZeros) { 71 FMF.setNoSignedZeros(); 72 } 73 if (CGM.getCodeGenOpts().ReciprocalMath) { 74 FMF.setAllowReciprocal(); 75 } 76 Builder.SetFastMathFlags(FMF); 77} 78 79CodeGenFunction::~CodeGenFunction() { 80 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 81 82 // If there are any unclaimed block infos, go ahead and destroy them 83 // now. This can happen if IR-gen gets clever and skips evaluating 84 // something. 85 if (FirstBlockInfo) 86 destroyBlockInfos(FirstBlockInfo); 87 88 if (getLangOpts().OpenMP) { 89 CGM.getOpenMPRuntime().functionFinished(*this); 90 } 91} 92 93LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 94 CharUnits Alignment; 95 if (CGM.getCXXABI().isTypeInfoCalculable(T)) { 96 Alignment = getContext().getTypeAlignInChars(T); 97 unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign; 98 if (MaxAlign && Alignment.getQuantity() > MaxAlign && 99 !getContext().isAlignmentRequired(T)) 100 Alignment = CharUnits::fromQuantity(MaxAlign); 101 } 102 return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T)); 103} 104 105llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 106 return CGM.getTypes().ConvertTypeForMem(T); 107} 108 109llvm::Type *CodeGenFunction::ConvertType(QualType T) { 110 return CGM.getTypes().ConvertType(T); 111} 112 113TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 114 type = type.getCanonicalType(); 115 while (true) { 116 switch (type->getTypeClass()) { 117#define TYPE(name, parent) 118#define ABSTRACT_TYPE(name, parent) 119#define NON_CANONICAL_TYPE(name, parent) case Type::name: 120#define DEPENDENT_TYPE(name, parent) case Type::name: 121#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 122#include "clang/AST/TypeNodes.def" 123 llvm_unreachable("non-canonical or dependent type in IR-generation"); 124 125 case Type::Auto: 126 llvm_unreachable("undeduced auto type in IR-generation"); 127 128 // Various scalar types. 129 case Type::Builtin: 130 case Type::Pointer: 131 case Type::BlockPointer: 132 case Type::LValueReference: 133 case Type::RValueReference: 134 case Type::MemberPointer: 135 case Type::Vector: 136 case Type::ExtVector: 137 case Type::FunctionProto: 138 case Type::FunctionNoProto: 139 case Type::Enum: 140 case Type::ObjCObjectPointer: 141 return TEK_Scalar; 142 143 // Complexes. 144 case Type::Complex: 145 return TEK_Complex; 146 147 // Arrays, records, and Objective-C objects. 148 case Type::ConstantArray: 149 case Type::IncompleteArray: 150 case Type::VariableArray: 151 case Type::Record: 152 case Type::ObjCObject: 153 case Type::ObjCInterface: 154 return TEK_Aggregate; 155 156 // We operate on atomic values according to their underlying type. 157 case Type::Atomic: 158 type = cast<AtomicType>(type)->getValueType(); 159 continue; 160 } 161 llvm_unreachable("unknown type kind!"); 162 } 163} 164 165llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 166 // For cleanliness, we try to avoid emitting the return block for 167 // simple cases. 168 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 169 170 if (CurBB) { 171 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 172 173 // We have a valid insert point, reuse it if it is empty or there are no 174 // explicit jumps to the return block. 175 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 176 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 177 delete ReturnBlock.getBlock(); 178 } else 179 EmitBlock(ReturnBlock.getBlock()); 180 return llvm::DebugLoc(); 181 } 182 183 // Otherwise, if the return block is the target of a single direct 184 // branch then we can just put the code in that block instead. This 185 // cleans up functions which started with a unified return block. 186 if (ReturnBlock.getBlock()->hasOneUse()) { 187 llvm::BranchInst *BI = 188 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 189 if (BI && BI->isUnconditional() && 190 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 191 // Record/return the DebugLoc of the simple 'return' expression to be used 192 // later by the actual 'ret' instruction. 193 llvm::DebugLoc Loc = BI->getDebugLoc(); 194 Builder.SetInsertPoint(BI->getParent()); 195 BI->eraseFromParent(); 196 delete ReturnBlock.getBlock(); 197 return Loc; 198 } 199 } 200 201 // FIXME: We are at an unreachable point, there is no reason to emit the block 202 // unless it has uses. However, we still need a place to put the debug 203 // region.end for now. 204 205 EmitBlock(ReturnBlock.getBlock()); 206 return llvm::DebugLoc(); 207} 208 209static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 210 if (!BB) return; 211 if (!BB->use_empty()) 212 return CGF.CurFn->getBasicBlockList().push_back(BB); 213 delete BB; 214} 215 216void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 217 assert(BreakContinueStack.empty() && 218 "mismatched push/pop in break/continue stack!"); 219 220 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 221 && NumSimpleReturnExprs == NumReturnExprs 222 && ReturnBlock.getBlock()->use_empty(); 223 // Usually the return expression is evaluated before the cleanup 224 // code. If the function contains only a simple return statement, 225 // such as a constant, the location before the cleanup code becomes 226 // the last useful breakpoint in the function, because the simple 227 // return expression will be evaluated after the cleanup code. To be 228 // safe, set the debug location for cleanup code to the location of 229 // the return statement. Otherwise the cleanup code should be at the 230 // end of the function's lexical scope. 231 // 232 // If there are multiple branches to the return block, the branch 233 // instructions will get the location of the return statements and 234 // all will be fine. 235 if (CGDebugInfo *DI = getDebugInfo()) { 236 if (OnlySimpleReturnStmts) 237 DI->EmitLocation(Builder, LastStopPoint); 238 else 239 DI->EmitLocation(Builder, EndLoc); 240 } 241 242 // Pop any cleanups that might have been associated with the 243 // parameters. Do this in whatever block we're currently in; it's 244 // important to do this before we enter the return block or return 245 // edges will be *really* confused. 246 bool EmitRetDbgLoc = true; 247 if (EHStack.stable_begin() != PrologueCleanupDepth) { 248 // Make sure the line table doesn't jump back into the body for 249 // the ret after it's been at EndLoc. 250 EmitRetDbgLoc = false; 251 252 if (CGDebugInfo *DI = getDebugInfo()) 253 if (OnlySimpleReturnStmts) 254 DI->EmitLocation(Builder, EndLoc); 255 256 PopCleanupBlocks(PrologueCleanupDepth); 257 } 258 259 // Emit function epilog (to return). 260 llvm::DebugLoc Loc = EmitReturnBlock(); 261 262 if (ShouldInstrumentFunction()) 263 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 264 265 // Emit debug descriptor for function end. 266 if (CGDebugInfo *DI = getDebugInfo()) 267 DI->EmitFunctionEnd(Builder); 268 269 // Reset the debug location to that of the simple 'return' expression, if any 270 // rather than that of the end of the function's scope '}'. 271 ApplyDebugLocation AL(*this, Loc); 272 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 273 EmitEndEHSpec(CurCodeDecl); 274 275 assert(EHStack.empty() && 276 "did not remove all scopes from cleanup stack!"); 277 278 // If someone did an indirect goto, emit the indirect goto block at the end of 279 // the function. 280 if (IndirectBranch) { 281 EmitBlock(IndirectBranch->getParent()); 282 Builder.ClearInsertionPoint(); 283 } 284 285 // If some of our locals escaped, insert a call to llvm.frameescape in the 286 // entry block. 287 if (!EscapedLocals.empty()) { 288 // Invert the map from local to index into a simple vector. There should be 289 // no holes. 290 SmallVector<llvm::Value *, 4> EscapeArgs; 291 EscapeArgs.resize(EscapedLocals.size()); 292 for (auto &Pair : EscapedLocals) 293 EscapeArgs[Pair.second] = Pair.first; 294 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 295 &CGM.getModule(), llvm::Intrinsic::frameescape); 296 CGBuilderTy(AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 297 } 298 299 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 300 llvm::Instruction *Ptr = AllocaInsertPt; 301 AllocaInsertPt = nullptr; 302 Ptr->eraseFromParent(); 303 304 // If someone took the address of a label but never did an indirect goto, we 305 // made a zero entry PHI node, which is illegal, zap it now. 306 if (IndirectBranch) { 307 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 308 if (PN->getNumIncomingValues() == 0) { 309 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 310 PN->eraseFromParent(); 311 } 312 } 313 314 EmitIfUsed(*this, EHResumeBlock); 315 EmitIfUsed(*this, TerminateLandingPad); 316 EmitIfUsed(*this, TerminateHandler); 317 EmitIfUsed(*this, UnreachableBlock); 318 319 if (CGM.getCodeGenOpts().EmitDeclMetadata) 320 EmitDeclMetadata(); 321 322 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 323 I = DeferredReplacements.begin(), 324 E = DeferredReplacements.end(); 325 I != E; ++I) { 326 I->first->replaceAllUsesWith(I->second); 327 I->first->eraseFromParent(); 328 } 329} 330 331/// ShouldInstrumentFunction - Return true if the current function should be 332/// instrumented with __cyg_profile_func_* calls 333bool CodeGenFunction::ShouldInstrumentFunction() { 334 if (!CGM.getCodeGenOpts().InstrumentFunctions) 335 return false; 336 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 337 return false; 338 return true; 339} 340 341/// EmitFunctionInstrumentation - Emit LLVM code to call the specified 342/// instrumentation function with the current function and the call site, if 343/// function instrumentation is enabled. 344void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 345 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 346 llvm::PointerType *PointerTy = Int8PtrTy; 347 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; 348 llvm::FunctionType *FunctionTy = 349 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false); 350 351 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 352 llvm::CallInst *CallSite = Builder.CreateCall( 353 CGM.getIntrinsic(llvm::Intrinsic::returnaddress), 354 llvm::ConstantInt::get(Int32Ty, 0), 355 "callsite"); 356 357 llvm::Value *args[] = { 358 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 359 CallSite 360 }; 361 362 EmitNounwindRuntimeCall(F, args); 363} 364 365void CodeGenFunction::EmitMCountInstrumentation() { 366 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 367 368 llvm::Constant *MCountFn = 369 CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName()); 370 EmitNounwindRuntimeCall(MCountFn); 371} 372 373// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 374// information in the program executable. The argument information stored 375// includes the argument name, its type, the address and access qualifiers used. 376static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 377 CodeGenModule &CGM, llvm::LLVMContext &Context, 378 SmallVector<llvm::Metadata *, 5> &kernelMDArgs, 379 CGBuilderTy &Builder, ASTContext &ASTCtx) { 380 // Create MDNodes that represent the kernel arg metadata. 381 // Each MDNode is a list in the form of "key", N number of values which is 382 // the same number of values as their are kernel arguments. 383 384 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy(); 385 386 // MDNode for the kernel argument address space qualifiers. 387 SmallVector<llvm::Metadata *, 8> addressQuals; 388 addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space")); 389 390 // MDNode for the kernel argument access qualifiers (images only). 391 SmallVector<llvm::Metadata *, 8> accessQuals; 392 accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual")); 393 394 // MDNode for the kernel argument type names. 395 SmallVector<llvm::Metadata *, 8> argTypeNames; 396 argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type")); 397 398 // MDNode for the kernel argument base type names. 399 SmallVector<llvm::Metadata *, 8> argBaseTypeNames; 400 argBaseTypeNames.push_back( 401 llvm::MDString::get(Context, "kernel_arg_base_type")); 402 403 // MDNode for the kernel argument type qualifiers. 404 SmallVector<llvm::Metadata *, 8> argTypeQuals; 405 argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual")); 406 407 // MDNode for the kernel argument names. 408 SmallVector<llvm::Metadata *, 8> argNames; 409 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name")); 410 411 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 412 const ParmVarDecl *parm = FD->getParamDecl(i); 413 QualType ty = parm->getType(); 414 std::string typeQuals; 415 416 if (ty->isPointerType()) { 417 QualType pointeeTy = ty->getPointeeType(); 418 419 // Get address qualifier. 420 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32( 421 ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace())))); 422 423 // Get argument type name. 424 std::string typeName = 425 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*"; 426 427 // Turn "unsigned type" to "utype" 428 std::string::size_type pos = typeName.find("unsigned"); 429 if (pointeeTy.isCanonical() && pos != std::string::npos) 430 typeName.erase(pos+1, 8); 431 432 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 433 434 std::string baseTypeName = 435 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString( 436 Policy) + 437 "*"; 438 439 // Turn "unsigned type" to "utype" 440 pos = baseTypeName.find("unsigned"); 441 if (pos != std::string::npos) 442 baseTypeName.erase(pos+1, 8); 443 444 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 445 446 // Get argument type qualifiers: 447 if (ty.isRestrictQualified()) 448 typeQuals = "restrict"; 449 if (pointeeTy.isConstQualified() || 450 (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) 451 typeQuals += typeQuals.empty() ? "const" : " const"; 452 if (pointeeTy.isVolatileQualified()) 453 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 454 } else { 455 uint32_t AddrSpc = 0; 456 if (ty->isImageType()) 457 AddrSpc = 458 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global); 459 460 addressQuals.push_back( 461 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc))); 462 463 // Get argument type name. 464 std::string typeName = ty.getUnqualifiedType().getAsString(Policy); 465 466 // Turn "unsigned type" to "utype" 467 std::string::size_type pos = typeName.find("unsigned"); 468 if (ty.isCanonical() && pos != std::string::npos) 469 typeName.erase(pos+1, 8); 470 471 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 472 473 std::string baseTypeName = 474 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy); 475 476 // Turn "unsigned type" to "utype" 477 pos = baseTypeName.find("unsigned"); 478 if (pos != std::string::npos) 479 baseTypeName.erase(pos+1, 8); 480 481 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 482 483 // Get argument type qualifiers: 484 if (ty.isConstQualified()) 485 typeQuals = "const"; 486 if (ty.isVolatileQualified()) 487 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 488 } 489 490 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals)); 491 492 // Get image access qualifier: 493 if (ty->isImageType()) { 494 const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>(); 495 if (A && A->isWriteOnly()) 496 accessQuals.push_back(llvm::MDString::get(Context, "write_only")); 497 else 498 accessQuals.push_back(llvm::MDString::get(Context, "read_only")); 499 // FIXME: what about read_write? 500 } else 501 accessQuals.push_back(llvm::MDString::get(Context, "none")); 502 503 // Get argument name. 504 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 505 } 506 507 kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals)); 508 kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals)); 509 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames)); 510 kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames)); 511 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals)); 512 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 513 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames)); 514} 515 516void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 517 llvm::Function *Fn) 518{ 519 if (!FD->hasAttr<OpenCLKernelAttr>()) 520 return; 521 522 llvm::LLVMContext &Context = getLLVMContext(); 523 524 SmallVector<llvm::Metadata *, 5> kernelMDArgs; 525 kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn)); 526 527 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder, 528 getContext()); 529 530 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 531 QualType hintQTy = A->getTypeHint(); 532 const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>(); 533 bool isSignedInteger = 534 hintQTy->isSignedIntegerType() || 535 (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType()); 536 llvm::Metadata *attrMDArgs[] = { 537 llvm::MDString::get(Context, "vec_type_hint"), 538 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 539 CGM.getTypes().ConvertType(A->getTypeHint()))), 540 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 541 llvm::IntegerType::get(Context, 32), 542 llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))}; 543 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 544 } 545 546 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 547 llvm::Metadata *attrMDArgs[] = { 548 llvm::MDString::get(Context, "work_group_size_hint"), 549 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 550 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 551 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 552 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 553 } 554 555 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 556 llvm::Metadata *attrMDArgs[] = { 557 llvm::MDString::get(Context, "reqd_work_group_size"), 558 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 559 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 560 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 561 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 562 } 563 564 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs); 565 llvm::NamedMDNode *OpenCLKernelMetadata = 566 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); 567 OpenCLKernelMetadata->addOperand(kernelMDNode); 568} 569 570/// Determine whether the function F ends with a return stmt. 571static bool endsWithReturn(const Decl* F) { 572 const Stmt *Body = nullptr; 573 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 574 Body = FD->getBody(); 575 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 576 Body = OMD->getBody(); 577 578 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 579 auto LastStmt = CS->body_rbegin(); 580 if (LastStmt != CS->body_rend()) 581 return isa<ReturnStmt>(*LastStmt); 582 } 583 return false; 584} 585 586void CodeGenFunction::StartFunction(GlobalDecl GD, 587 QualType RetTy, 588 llvm::Function *Fn, 589 const CGFunctionInfo &FnInfo, 590 const FunctionArgList &Args, 591 SourceLocation Loc, 592 SourceLocation StartLoc) { 593 assert(!CurFn && 594 "Do not use a CodeGenFunction object for more than one function"); 595 596 const Decl *D = GD.getDecl(); 597 598 DidCallStackSave = false; 599 CurCodeDecl = D; 600 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 601 FnRetTy = RetTy; 602 CurFn = Fn; 603 CurFnInfo = &FnInfo; 604 assert(CurFn->isDeclaration() && "Function already has body?"); 605 606 if (CGM.isInSanitizerBlacklist(Fn, Loc)) 607 SanOpts.clear(); 608 609 // Pass inline keyword to optimizer if it appears explicitly on any 610 // declaration. Also, in the case of -fno-inline attach NoInline 611 // attribute to all function that are not marked AlwaysInline. 612 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 613 if (!CGM.getCodeGenOpts().NoInline) { 614 for (auto RI : FD->redecls()) 615 if (RI->isInlineSpecified()) { 616 Fn->addFnAttr(llvm::Attribute::InlineHint); 617 break; 618 } 619 } else if (!FD->hasAttr<AlwaysInlineAttr>()) 620 Fn->addFnAttr(llvm::Attribute::NoInline); 621 } 622 623 if (getLangOpts().OpenCL) { 624 // Add metadata for a kernel function. 625 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 626 EmitOpenCLKernelMetadata(FD, Fn); 627 } 628 629 // If we are checking function types, emit a function type signature as 630 // prologue data. 631 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 632 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 633 if (llvm::Constant *PrologueSig = 634 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 635 llvm::Constant *FTRTTIConst = 636 CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true); 637 llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst }; 638 llvm::Constant *PrologueStructConst = 639 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); 640 Fn->setPrologueData(PrologueStructConst); 641 } 642 } 643 } 644 645 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 646 647 // Create a marker to make it easy to insert allocas into the entryblock 648 // later. Don't create this with the builder, because we don't want it 649 // folded. 650 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 651 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 652 if (Builder.isNamePreserving()) 653 AllocaInsertPt->setName("allocapt"); 654 655 ReturnBlock = getJumpDestInCurrentScope("return"); 656 657 Builder.SetInsertPoint(EntryBB); 658 659 // Emit subprogram debug descriptor. 660 if (CGDebugInfo *DI = getDebugInfo()) { 661 SmallVector<QualType, 16> ArgTypes; 662 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 663 i != e; ++i) { 664 ArgTypes.push_back((*i)->getType()); 665 } 666 667 QualType FnType = 668 getContext().getFunctionType(RetTy, ArgTypes, 669 FunctionProtoType::ExtProtoInfo()); 670 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder); 671 } 672 673 if (ShouldInstrumentFunction()) 674 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 675 676 if (CGM.getCodeGenOpts().InstrumentForProfiling) 677 EmitMCountInstrumentation(); 678 679 if (RetTy->isVoidType()) { 680 // Void type; nothing to return. 681 ReturnValue = nullptr; 682 683 // Count the implicit return. 684 if (!endsWithReturn(D)) 685 ++NumReturnExprs; 686 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 687 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 688 // Indirect aggregate return; emit returned value directly into sret slot. 689 // This reduces code size, and affects correctness in C++. 690 auto AI = CurFn->arg_begin(); 691 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 692 ++AI; 693 ReturnValue = AI; 694 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 695 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 696 // Load the sret pointer from the argument struct and return into that. 697 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 698 llvm::Function::arg_iterator EI = CurFn->arg_end(); 699 --EI; 700 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, EI, Idx); 701 ReturnValue = Builder.CreateLoad(Addr, "agg.result"); 702 } else { 703 ReturnValue = CreateIRTemp(RetTy, "retval"); 704 705 // Tell the epilog emitter to autorelease the result. We do this 706 // now so that various specialized functions can suppress it 707 // during their IR-generation. 708 if (getLangOpts().ObjCAutoRefCount && 709 !CurFnInfo->isReturnsRetained() && 710 RetTy->isObjCRetainableType()) 711 AutoreleaseResult = true; 712 } 713 714 EmitStartEHSpec(CurCodeDecl); 715 716 PrologueCleanupDepth = EHStack.stable_begin(); 717 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 718 719 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 720 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 721 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 722 if (MD->getParent()->isLambda() && 723 MD->getOverloadedOperator() == OO_Call) { 724 // We're in a lambda; figure out the captures. 725 MD->getParent()->getCaptureFields(LambdaCaptureFields, 726 LambdaThisCaptureField); 727 if (LambdaThisCaptureField) { 728 // If this lambda captures this, load it. 729 LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 730 CXXThisValue = EmitLoadOfLValue(ThisLValue, 731 SourceLocation()).getScalarVal(); 732 } 733 for (auto *FD : MD->getParent()->fields()) { 734 if (FD->hasCapturedVLAType()) { 735 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 736 SourceLocation()).getScalarVal(); 737 auto VAT = FD->getCapturedVLAType(); 738 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 739 } 740 } 741 } else { 742 // Not in a lambda; just use 'this' from the method. 743 // FIXME: Should we generate a new load for each use of 'this'? The 744 // fast register allocator would be happier... 745 CXXThisValue = CXXABIThisValue; 746 } 747 } 748 749 // If any of the arguments have a variably modified type, make sure to 750 // emit the type size. 751 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 752 i != e; ++i) { 753 const VarDecl *VD = *i; 754 755 // Dig out the type as written from ParmVarDecls; it's unclear whether 756 // the standard (C99 6.9.1p10) requires this, but we're following the 757 // precedent set by gcc. 758 QualType Ty; 759 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 760 Ty = PVD->getOriginalType(); 761 else 762 Ty = VD->getType(); 763 764 if (Ty->isVariablyModifiedType()) 765 EmitVariablyModifiedType(Ty); 766 } 767 // Emit a location at the end of the prologue. 768 if (CGDebugInfo *DI = getDebugInfo()) 769 DI->EmitLocation(Builder, StartLoc); 770} 771 772void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, 773 const Stmt *Body) { 774 RegionCounter Cnt = getPGORegionCounter(Body); 775 Cnt.beginRegion(Builder); 776 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 777 EmitCompoundStmtWithoutScope(*S); 778 else 779 EmitStmt(Body); 780} 781 782/// When instrumenting to collect profile data, the counts for some blocks 783/// such as switch cases need to not include the fall-through counts, so 784/// emit a branch around the instrumentation code. When not instrumenting, 785/// this just calls EmitBlock(). 786void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 787 RegionCounter &Cnt) { 788 llvm::BasicBlock *SkipCountBB = nullptr; 789 if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) { 790 // When instrumenting for profiling, the fallthrough to certain 791 // statements needs to skip over the instrumentation code so that we 792 // get an accurate count. 793 SkipCountBB = createBasicBlock("skipcount"); 794 EmitBranch(SkipCountBB); 795 } 796 EmitBlock(BB); 797 Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true); 798 if (SkipCountBB) 799 EmitBlock(SkipCountBB); 800} 801 802/// Tries to mark the given function nounwind based on the 803/// non-existence of any throwing calls within it. We believe this is 804/// lightweight enough to do at -O0. 805static void TryMarkNoThrow(llvm::Function *F) { 806 // LLVM treats 'nounwind' on a function as part of the type, so we 807 // can't do this on functions that can be overwritten. 808 if (F->mayBeOverridden()) return; 809 810 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) 811 for (llvm::BasicBlock::iterator 812 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) 813 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) { 814 if (!Call->doesNotThrow()) 815 return; 816 } else if (isa<llvm::ResumeInst>(&*BI)) { 817 return; 818 } 819 F->setDoesNotThrow(); 820} 821 822void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 823 const CGFunctionInfo &FnInfo) { 824 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 825 826 // Check if we should generate debug info for this function. 827 if (FD->hasAttr<NoDebugAttr>()) 828 DebugInfo = nullptr; // disable debug info indefinitely for this function 829 830 FunctionArgList Args; 831 QualType ResTy = FD->getReturnType(); 832 833 CurGD = GD; 834 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 835 if (MD && MD->isInstance()) { 836 if (CGM.getCXXABI().HasThisReturn(GD)) 837 ResTy = MD->getThisType(getContext()); 838 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 839 ResTy = CGM.getContext().VoidPtrTy; 840 CGM.getCXXABI().buildThisParam(*this, Args); 841 } 842 843 Args.append(FD->param_begin(), FD->param_end()); 844 845 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 846 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 847 848 SourceRange BodyRange; 849 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 850 CurEHLocation = BodyRange.getEnd(); 851 852 // Use the location of the start of the function to determine where 853 // the function definition is located. By default use the location 854 // of the declaration as the location for the subprogram. A function 855 // may lack a declaration in the source code if it is created by code 856 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 857 SourceLocation Loc = FD->getLocation(); 858 859 // If this is a function specialization then use the pattern body 860 // as the location for the function. 861 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 862 if (SpecDecl->hasBody(SpecDecl)) 863 Loc = SpecDecl->getLocation(); 864 865 // Emit the standard function prologue. 866 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 867 868 // Generate the body of the function. 869 PGO.checkGlobalDecl(GD); 870 PGO.assignRegionCounters(GD.getDecl(), CurFn); 871 if (isa<CXXDestructorDecl>(FD)) 872 EmitDestructorBody(Args); 873 else if (isa<CXXConstructorDecl>(FD)) 874 EmitConstructorBody(Args); 875 else if (getLangOpts().CUDA && 876 !getLangOpts().CUDAIsDevice && 877 FD->hasAttr<CUDAGlobalAttr>()) 878 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); 879 else if (isa<CXXConversionDecl>(FD) && 880 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { 881 // The lambda conversion to block pointer is special; the semantics can't be 882 // expressed in the AST, so IRGen needs to special-case it. 883 EmitLambdaToBlockPointerBody(Args); 884 } else if (isa<CXXMethodDecl>(FD) && 885 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 886 // The lambda static invoker function is special, because it forwards or 887 // clones the body of the function call operator (but is actually static). 888 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); 889 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 890 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 891 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 892 // Implicit copy-assignment gets the same special treatment as implicit 893 // copy-constructors. 894 emitImplicitAssignmentOperatorBody(Args); 895 } else if (Stmt *Body = FD->getBody()) { 896 EmitFunctionBody(Args, Body); 897 } else 898 llvm_unreachable("no definition for emitted function"); 899 900 // C++11 [stmt.return]p2: 901 // Flowing off the end of a function [...] results in undefined behavior in 902 // a value-returning function. 903 // C11 6.9.1p12: 904 // If the '}' that terminates a function is reached, and the value of the 905 // function call is used by the caller, the behavior is undefined. 906 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 907 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 908 if (SanOpts.has(SanitizerKind::Return)) { 909 SanitizerScope SanScope(this); 910 llvm::Value *IsFalse = Builder.getFalse(); 911 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 912 "missing_return", EmitCheckSourceLocation(FD->getLocation()), 913 None); 914 } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) 915 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap)); 916 Builder.CreateUnreachable(); 917 Builder.ClearInsertionPoint(); 918 } 919 920 // Emit the standard function epilogue. 921 FinishFunction(BodyRange.getEnd()); 922 923 // If we haven't marked the function nothrow through other means, do 924 // a quick pass now to see if we can. 925 if (!CurFn->doesNotThrow()) 926 TryMarkNoThrow(CurFn); 927} 928 929/// ContainsLabel - Return true if the statement contains a label in it. If 930/// this statement is not executed normally, it not containing a label means 931/// that we can just remove the code. 932bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 933 // Null statement, not a label! 934 if (!S) return false; 935 936 // If this is a label, we have to emit the code, consider something like: 937 // if (0) { ... foo: bar(); } goto foo; 938 // 939 // TODO: If anyone cared, we could track __label__'s, since we know that you 940 // can't jump to one from outside their declared region. 941 if (isa<LabelStmt>(S)) 942 return true; 943 944 // If this is a case/default statement, and we haven't seen a switch, we have 945 // to emit the code. 946 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 947 return true; 948 949 // If this is a switch statement, we want to ignore cases below it. 950 if (isa<SwitchStmt>(S)) 951 IgnoreCaseStmts = true; 952 953 // Scan subexpressions for verboten labels. 954 for (Stmt::const_child_range I = S->children(); I; ++I) 955 if (ContainsLabel(*I, IgnoreCaseStmts)) 956 return true; 957 958 return false; 959} 960 961/// containsBreak - Return true if the statement contains a break out of it. 962/// If the statement (recursively) contains a switch or loop with a break 963/// inside of it, this is fine. 964bool CodeGenFunction::containsBreak(const Stmt *S) { 965 // Null statement, not a label! 966 if (!S) return false; 967 968 // If this is a switch or loop that defines its own break scope, then we can 969 // include it and anything inside of it. 970 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 971 isa<ForStmt>(S)) 972 return false; 973 974 if (isa<BreakStmt>(S)) 975 return true; 976 977 // Scan subexpressions for verboten breaks. 978 for (Stmt::const_child_range I = S->children(); I; ++I) 979 if (containsBreak(*I)) 980 return true; 981 982 return false; 983} 984 985 986/// ConstantFoldsToSimpleInteger - If the specified expression does not fold 987/// to a constant, or if it does but contains a label, return false. If it 988/// constant folds return true and set the boolean result in Result. 989bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 990 bool &ResultBool) { 991 llvm::APSInt ResultInt; 992 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt)) 993 return false; 994 995 ResultBool = ResultInt.getBoolValue(); 996 return true; 997} 998 999/// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1000/// to a constant, or if it does but contains a label, return false. If it 1001/// constant folds return true and set the folded value. 1002bool CodeGenFunction:: 1003ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) { 1004 // FIXME: Rename and handle conversion of other evaluatable things 1005 // to bool. 1006 llvm::APSInt Int; 1007 if (!Cond->EvaluateAsInt(Int, getContext())) 1008 return false; // Not foldable, not integer or not fully evaluatable. 1009 1010 if (CodeGenFunction::ContainsLabel(Cond)) 1011 return false; // Contains a label. 1012 1013 ResultInt = Int; 1014 return true; 1015} 1016 1017 1018 1019/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1020/// statement) to the specified blocks. Based on the condition, this might try 1021/// to simplify the codegen of the conditional based on the branch. 1022/// 1023void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1024 llvm::BasicBlock *TrueBlock, 1025 llvm::BasicBlock *FalseBlock, 1026 uint64_t TrueCount) { 1027 Cond = Cond->IgnoreParens(); 1028 1029 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1030 1031 // Handle X && Y in a condition. 1032 if (CondBOp->getOpcode() == BO_LAnd) { 1033 RegionCounter Cnt = getPGORegionCounter(CondBOp); 1034 1035 // If we have "1 && X", simplify the code. "0 && X" would have constant 1036 // folded if the case was simple enough. 1037 bool ConstantBool = false; 1038 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1039 ConstantBool) { 1040 // br(1 && X) -> br(X). 1041 Cnt.beginRegion(Builder); 1042 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1043 TrueCount); 1044 } 1045 1046 // If we have "X && 1", simplify the code to use an uncond branch. 1047 // "X && 0" would have been constant folded to 0. 1048 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1049 ConstantBool) { 1050 // br(X && 1) -> br(X). 1051 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1052 TrueCount); 1053 } 1054 1055 // Emit the LHS as a conditional. If the LHS conditional is false, we 1056 // want to jump to the FalseBlock. 1057 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1058 // The counter tells us how often we evaluate RHS, and all of TrueCount 1059 // can be propagated to that branch. 1060 uint64_t RHSCount = Cnt.getCount(); 1061 1062 ConditionalEvaluation eval(*this); 1063 { 1064 ApplyDebugLocation DL(*this, Cond); 1065 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); 1066 EmitBlock(LHSTrue); 1067 } 1068 1069 // Any temporaries created here are conditional. 1070 Cnt.beginRegion(Builder); 1071 eval.begin(*this); 1072 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); 1073 eval.end(*this); 1074 1075 return; 1076 } 1077 1078 if (CondBOp->getOpcode() == BO_LOr) { 1079 RegionCounter Cnt = getPGORegionCounter(CondBOp); 1080 1081 // If we have "0 || X", simplify the code. "1 || X" would have constant 1082 // folded if the case was simple enough. 1083 bool ConstantBool = false; 1084 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1085 !ConstantBool) { 1086 // br(0 || X) -> br(X). 1087 Cnt.beginRegion(Builder); 1088 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1089 TrueCount); 1090 } 1091 1092 // If we have "X || 0", simplify the code to use an uncond branch. 1093 // "X || 1" would have been constant folded to 1. 1094 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1095 !ConstantBool) { 1096 // br(X || 0) -> br(X). 1097 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1098 TrueCount); 1099 } 1100 1101 // Emit the LHS as a conditional. If the LHS conditional is true, we 1102 // want to jump to the TrueBlock. 1103 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1104 // We have the count for entry to the RHS and for the whole expression 1105 // being true, so we can divy up True count between the short circuit and 1106 // the RHS. 1107 uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount(); 1108 uint64_t RHSCount = TrueCount - LHSCount; 1109 1110 ConditionalEvaluation eval(*this); 1111 { 1112 ApplyDebugLocation DL(*this, Cond); 1113 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); 1114 EmitBlock(LHSFalse); 1115 } 1116 1117 // Any temporaries created here are conditional. 1118 Cnt.beginRegion(Builder); 1119 eval.begin(*this); 1120 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); 1121 1122 eval.end(*this); 1123 1124 return; 1125 } 1126 } 1127 1128 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1129 // br(!x, t, f) -> br(x, f, t) 1130 if (CondUOp->getOpcode() == UO_LNot) { 1131 // Negate the count. 1132 uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount; 1133 // Negate the condition and swap the destination blocks. 1134 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1135 FalseCount); 1136 } 1137 } 1138 1139 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1140 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1141 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1142 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1143 1144 RegionCounter Cnt = getPGORegionCounter(CondOp); 1145 ConditionalEvaluation cond(*this); 1146 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount()); 1147 1148 // When computing PGO branch weights, we only know the overall count for 1149 // the true block. This code is essentially doing tail duplication of the 1150 // naive code-gen, introducing new edges for which counts are not 1151 // available. Divide the counts proportionally between the LHS and RHS of 1152 // the conditional operator. 1153 uint64_t LHSScaledTrueCount = 0; 1154 if (TrueCount) { 1155 double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount(); 1156 LHSScaledTrueCount = TrueCount * LHSRatio; 1157 } 1158 1159 cond.begin(*this); 1160 EmitBlock(LHSBlock); 1161 Cnt.beginRegion(Builder); 1162 { 1163 ApplyDebugLocation DL(*this, Cond); 1164 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1165 LHSScaledTrueCount); 1166 } 1167 cond.end(*this); 1168 1169 cond.begin(*this); 1170 EmitBlock(RHSBlock); 1171 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1172 TrueCount - LHSScaledTrueCount); 1173 cond.end(*this); 1174 1175 return; 1176 } 1177 1178 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1179 // Conditional operator handling can give us a throw expression as a 1180 // condition for a case like: 1181 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1182 // Fold this to: 1183 // br(c, throw x, br(y, t, f)) 1184 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1185 return; 1186 } 1187 1188 // Create branch weights based on the number of times we get here and the 1189 // number of times the condition should be true. 1190 uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount); 1191 llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount, 1192 CurrentCount - TrueCount); 1193 1194 // Emit the code with the fully general case. 1195 llvm::Value *CondV; 1196 { 1197 ApplyDebugLocation DL(*this, Cond); 1198 CondV = EvaluateExprAsBool(Cond); 1199 } 1200 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights); 1201} 1202 1203/// ErrorUnsupported - Print out an error that codegen doesn't support the 1204/// specified stmt yet. 1205void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1206 CGM.ErrorUnsupported(S, Type); 1207} 1208 1209/// emitNonZeroVLAInit - Emit the "zero" initialization of a 1210/// variable-length array whose elements have a non-zero bit-pattern. 1211/// 1212/// \param baseType the inner-most element type of the array 1213/// \param src - a char* pointing to the bit-pattern for a single 1214/// base element of the array 1215/// \param sizeInChars - the total size of the VLA, in chars 1216static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1217 llvm::Value *dest, llvm::Value *src, 1218 llvm::Value *sizeInChars) { 1219 std::pair<CharUnits,CharUnits> baseSizeAndAlign 1220 = CGF.getContext().getTypeInfoInChars(baseType); 1221 1222 CGBuilderTy &Builder = CGF.Builder; 1223 1224 llvm::Value *baseSizeInChars 1225 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity()); 1226 1227 llvm::Type *i8p = Builder.getInt8PtrTy(); 1228 1229 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin"); 1230 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end"); 1231 1232 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1233 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1234 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1235 1236 // Make a loop over the VLA. C99 guarantees that the VLA element 1237 // count must be nonzero. 1238 CGF.EmitBlock(loopBB); 1239 1240 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur"); 1241 cur->addIncoming(begin, originBB); 1242 1243 // memcpy the individual element bit-pattern. 1244 Builder.CreateMemCpy(cur, src, baseSizeInChars, 1245 baseSizeAndAlign.second.getQuantity(), 1246 /*volatile*/ false); 1247 1248 // Go to the next element. 1249 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), 1250 cur, 1, "vla.next"); 1251 1252 // Leave if that's the end of the VLA. 1253 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1254 Builder.CreateCondBr(done, contBB, loopBB); 1255 cur->addIncoming(next, loopBB); 1256 1257 CGF.EmitBlock(contBB); 1258} 1259 1260void 1261CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 1262 // Ignore empty classes in C++. 1263 if (getLangOpts().CPlusPlus) { 1264 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1265 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1266 return; 1267 } 1268 } 1269 1270 // Cast the dest ptr to the appropriate i8 pointer type. 1271 unsigned DestAS = 1272 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 1273 llvm::Type *BP = Builder.getInt8PtrTy(DestAS); 1274 if (DestPtr->getType() != BP) 1275 DestPtr = Builder.CreateBitCast(DestPtr, BP); 1276 1277 // Get size and alignment info for this aggregate. 1278 std::pair<CharUnits, CharUnits> TypeInfo = 1279 getContext().getTypeInfoInChars(Ty); 1280 CharUnits Size = TypeInfo.first; 1281 CharUnits Align = TypeInfo.second; 1282 1283 llvm::Value *SizeVal; 1284 const VariableArrayType *vla; 1285 1286 // Don't bother emitting a zero-byte memset. 1287 if (Size.isZero()) { 1288 // But note that getTypeInfo returns 0 for a VLA. 1289 if (const VariableArrayType *vlaType = 1290 dyn_cast_or_null<VariableArrayType>( 1291 getContext().getAsArrayType(Ty))) { 1292 QualType eltType; 1293 llvm::Value *numElts; 1294 std::tie(numElts, eltType) = getVLASize(vlaType); 1295 1296 SizeVal = numElts; 1297 CharUnits eltSize = getContext().getTypeSizeInChars(eltType); 1298 if (!eltSize.isOne()) 1299 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1300 vla = vlaType; 1301 } else { 1302 return; 1303 } 1304 } else { 1305 SizeVal = CGM.getSize(Size); 1306 vla = nullptr; 1307 } 1308 1309 // If the type contains a pointer to data member we can't memset it to zero. 1310 // Instead, create a null constant and copy it to the destination. 1311 // TODO: there are other patterns besides zero that we can usefully memset, 1312 // like -1, which happens to be the pattern used by member-pointers. 1313 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1314 // For a VLA, emit a single element, then splat that over the VLA. 1315 if (vla) Ty = getContext().getBaseElementType(vla); 1316 1317 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1318 1319 llvm::GlobalVariable *NullVariable = 1320 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1321 /*isConstant=*/true, 1322 llvm::GlobalVariable::PrivateLinkage, 1323 NullConstant, Twine()); 1324 llvm::Value *SrcPtr = 1325 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); 1326 1327 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1328 1329 // Get and call the appropriate llvm.memcpy overload. 1330 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false); 1331 return; 1332 } 1333 1334 // Otherwise, just memset the whole thing to zero. This is legal 1335 // because in LLVM, all default initializers (other than the ones we just 1336 // handled above) are guaranteed to have a bit pattern of all zeros. 1337 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, 1338 Align.getQuantity(), false); 1339} 1340 1341llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1342 // Make sure that there is a block for the indirect goto. 1343 if (!IndirectBranch) 1344 GetIndirectGotoBlock(); 1345 1346 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1347 1348 // Make sure the indirect branch includes all of the address-taken blocks. 1349 IndirectBranch->addDestination(BB); 1350 return llvm::BlockAddress::get(CurFn, BB); 1351} 1352 1353llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1354 // If we already made the indirect branch for indirect goto, return its block. 1355 if (IndirectBranch) return IndirectBranch->getParent(); 1356 1357 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 1358 1359 // Create the PHI node that indirect gotos will add entries to. 1360 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1361 "indirect.goto.dest"); 1362 1363 // Create the indirect branch instruction. 1364 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1365 return IndirectBranch->getParent(); 1366} 1367 1368/// Computes the length of an array in elements, as well as the base 1369/// element type and a properly-typed first element pointer. 1370llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1371 QualType &baseType, 1372 llvm::Value *&addr) { 1373 const ArrayType *arrayType = origArrayType; 1374 1375 // If it's a VLA, we have to load the stored size. Note that 1376 // this is the size of the VLA in bytes, not its size in elements. 1377 llvm::Value *numVLAElements = nullptr; 1378 if (isa<VariableArrayType>(arrayType)) { 1379 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; 1380 1381 // Walk into all VLAs. This doesn't require changes to addr, 1382 // which has type T* where T is the first non-VLA element type. 1383 do { 1384 QualType elementType = arrayType->getElementType(); 1385 arrayType = getContext().getAsArrayType(elementType); 1386 1387 // If we only have VLA components, 'addr' requires no adjustment. 1388 if (!arrayType) { 1389 baseType = elementType; 1390 return numVLAElements; 1391 } 1392 } while (isa<VariableArrayType>(arrayType)); 1393 1394 // We get out here only if we find a constant array type 1395 // inside the VLA. 1396 } 1397 1398 // We have some number of constant-length arrays, so addr should 1399 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1400 // down to the first element of addr. 1401 SmallVector<llvm::Value*, 8> gepIndices; 1402 1403 // GEP down to the array type. 1404 llvm::ConstantInt *zero = Builder.getInt32(0); 1405 gepIndices.push_back(zero); 1406 1407 uint64_t countFromCLAs = 1; 1408 QualType eltType; 1409 1410 llvm::ArrayType *llvmArrayType = 1411 dyn_cast<llvm::ArrayType>( 1412 cast<llvm::PointerType>(addr->getType())->getElementType()); 1413 while (llvmArrayType) { 1414 assert(isa<ConstantArrayType>(arrayType)); 1415 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1416 == llvmArrayType->getNumElements()); 1417 1418 gepIndices.push_back(zero); 1419 countFromCLAs *= llvmArrayType->getNumElements(); 1420 eltType = arrayType->getElementType(); 1421 1422 llvmArrayType = 1423 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1424 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1425 assert((!llvmArrayType || arrayType) && 1426 "LLVM and Clang types are out-of-synch"); 1427 } 1428 1429 if (arrayType) { 1430 // From this point onwards, the Clang array type has been emitted 1431 // as some other type (probably a packed struct). Compute the array 1432 // size, and just emit the 'begin' expression as a bitcast. 1433 while (arrayType) { 1434 countFromCLAs *= 1435 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1436 eltType = arrayType->getElementType(); 1437 arrayType = getContext().getAsArrayType(eltType); 1438 } 1439 1440 unsigned AddressSpace = addr->getType()->getPointerAddressSpace(); 1441 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace); 1442 addr = Builder.CreateBitCast(addr, BaseType, "array.begin"); 1443 } else { 1444 // Create the actual GEP. 1445 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin"); 1446 } 1447 1448 baseType = eltType; 1449 1450 llvm::Value *numElements 1451 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1452 1453 // If we had any VLA dimensions, factor them in. 1454 if (numVLAElements) 1455 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1456 1457 return numElements; 1458} 1459 1460std::pair<llvm::Value*, QualType> 1461CodeGenFunction::getVLASize(QualType type) { 1462 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1463 assert(vla && "type was not a variable array type!"); 1464 return getVLASize(vla); 1465} 1466 1467std::pair<llvm::Value*, QualType> 1468CodeGenFunction::getVLASize(const VariableArrayType *type) { 1469 // The number of elements so far; always size_t. 1470 llvm::Value *numElements = nullptr; 1471 1472 QualType elementType; 1473 do { 1474 elementType = type->getElementType(); 1475 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1476 assert(vlaSize && "no size for VLA!"); 1477 assert(vlaSize->getType() == SizeTy); 1478 1479 if (!numElements) { 1480 numElements = vlaSize; 1481 } else { 1482 // It's undefined behavior if this wraps around, so mark it that way. 1483 // FIXME: Teach -fsanitize=undefined to trap this. 1484 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1485 } 1486 } while ((type = getContext().getAsVariableArrayType(elementType))); 1487 1488 return std::pair<llvm::Value*,QualType>(numElements, elementType); 1489} 1490 1491void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 1492 assert(type->isVariablyModifiedType() && 1493 "Must pass variably modified type to EmitVLASizes!"); 1494 1495 EnsureInsertPoint(); 1496 1497 // We're going to walk down into the type and look for VLA 1498 // expressions. 1499 do { 1500 assert(type->isVariablyModifiedType()); 1501 1502 const Type *ty = type.getTypePtr(); 1503 switch (ty->getTypeClass()) { 1504 1505#define TYPE(Class, Base) 1506#define ABSTRACT_TYPE(Class, Base) 1507#define NON_CANONICAL_TYPE(Class, Base) 1508#define DEPENDENT_TYPE(Class, Base) case Type::Class: 1509#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 1510#include "clang/AST/TypeNodes.def" 1511 llvm_unreachable("unexpected dependent type!"); 1512 1513 // These types are never variably-modified. 1514 case Type::Builtin: 1515 case Type::Complex: 1516 case Type::Vector: 1517 case Type::ExtVector: 1518 case Type::Record: 1519 case Type::Enum: 1520 case Type::Elaborated: 1521 case Type::TemplateSpecialization: 1522 case Type::ObjCObject: 1523 case Type::ObjCInterface: 1524 case Type::ObjCObjectPointer: 1525 llvm_unreachable("type class is never variably-modified!"); 1526 1527 case Type::Adjusted: 1528 type = cast<AdjustedType>(ty)->getAdjustedType(); 1529 break; 1530 1531 case Type::Decayed: 1532 type = cast<DecayedType>(ty)->getPointeeType(); 1533 break; 1534 1535 case Type::Pointer: 1536 type = cast<PointerType>(ty)->getPointeeType(); 1537 break; 1538 1539 case Type::BlockPointer: 1540 type = cast<BlockPointerType>(ty)->getPointeeType(); 1541 break; 1542 1543 case Type::LValueReference: 1544 case Type::RValueReference: 1545 type = cast<ReferenceType>(ty)->getPointeeType(); 1546 break; 1547 1548 case Type::MemberPointer: 1549 type = cast<MemberPointerType>(ty)->getPointeeType(); 1550 break; 1551 1552 case Type::ConstantArray: 1553 case Type::IncompleteArray: 1554 // Losing element qualification here is fine. 1555 type = cast<ArrayType>(ty)->getElementType(); 1556 break; 1557 1558 case Type::VariableArray: { 1559 // Losing element qualification here is fine. 1560 const VariableArrayType *vat = cast<VariableArrayType>(ty); 1561 1562 // Unknown size indication requires no size computation. 1563 // Otherwise, evaluate and record it. 1564 if (const Expr *size = vat->getSizeExpr()) { 1565 // It's possible that we might have emitted this already, 1566 // e.g. with a typedef and a pointer to it. 1567 llvm::Value *&entry = VLASizeMap[size]; 1568 if (!entry) { 1569 llvm::Value *Size = EmitScalarExpr(size); 1570 1571 // C11 6.7.6.2p5: 1572 // If the size is an expression that is not an integer constant 1573 // expression [...] each time it is evaluated it shall have a value 1574 // greater than zero. 1575 if (SanOpts.has(SanitizerKind::VLABound) && 1576 size->getType()->isSignedIntegerType()) { 1577 SanitizerScope SanScope(this); 1578 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 1579 llvm::Constant *StaticArgs[] = { 1580 EmitCheckSourceLocation(size->getLocStart()), 1581 EmitCheckTypeDescriptor(size->getType()) 1582 }; 1583 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 1584 SanitizerKind::VLABound), 1585 "vla_bound_not_positive", StaticArgs, Size); 1586 } 1587 1588 // Always zexting here would be wrong if it weren't 1589 // undefined behavior to have a negative bound. 1590 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 1591 } 1592 } 1593 type = vat->getElementType(); 1594 break; 1595 } 1596 1597 case Type::FunctionProto: 1598 case Type::FunctionNoProto: 1599 type = cast<FunctionType>(ty)->getReturnType(); 1600 break; 1601 1602 case Type::Paren: 1603 case Type::TypeOf: 1604 case Type::UnaryTransform: 1605 case Type::Attributed: 1606 case Type::SubstTemplateTypeParm: 1607 case Type::PackExpansion: 1608 // Keep walking after single level desugaring. 1609 type = type.getSingleStepDesugaredType(getContext()); 1610 break; 1611 1612 case Type::Typedef: 1613 case Type::Decltype: 1614 case Type::Auto: 1615 // Stop walking: nothing to do. 1616 return; 1617 1618 case Type::TypeOfExpr: 1619 // Stop walking: emit typeof expression. 1620 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 1621 return; 1622 1623 case Type::Atomic: 1624 type = cast<AtomicType>(ty)->getValueType(); 1625 break; 1626 } 1627 } while (type->isVariablyModifiedType()); 1628} 1629 1630llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 1631 if (getContext().getBuiltinVaListType()->isArrayType()) 1632 return EmitScalarExpr(E); 1633 return EmitLValue(E).getAddress(); 1634} 1635 1636void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 1637 llvm::Constant *Init) { 1638 assert (Init && "Invalid DeclRefExpr initializer!"); 1639 if (CGDebugInfo *Dbg = getDebugInfo()) 1640 if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) 1641 Dbg->EmitGlobalVariable(E->getDecl(), Init); 1642} 1643 1644CodeGenFunction::PeepholeProtection 1645CodeGenFunction::protectFromPeepholes(RValue rvalue) { 1646 // At the moment, the only aggressive peephole we do in IR gen 1647 // is trunc(zext) folding, but if we add more, we can easily 1648 // extend this protection. 1649 1650 if (!rvalue.isScalar()) return PeepholeProtection(); 1651 llvm::Value *value = rvalue.getScalarVal(); 1652 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 1653 1654 // Just make an extra bitcast. 1655 assert(HaveInsertPoint()); 1656 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 1657 Builder.GetInsertBlock()); 1658 1659 PeepholeProtection protection; 1660 protection.Inst = inst; 1661 return protection; 1662} 1663 1664void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 1665 if (!protection.Inst) return; 1666 1667 // In theory, we could try to duplicate the peepholes now, but whatever. 1668 protection.Inst->eraseFromParent(); 1669} 1670 1671llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 1672 llvm::Value *AnnotatedVal, 1673 StringRef AnnotationStr, 1674 SourceLocation Location) { 1675 llvm::Value *Args[4] = { 1676 AnnotatedVal, 1677 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 1678 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 1679 CGM.EmitAnnotationLineNo(Location) 1680 }; 1681 return Builder.CreateCall(AnnotationFn, Args); 1682} 1683 1684void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 1685 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1686 // FIXME We create a new bitcast for every annotation because that's what 1687 // llvm-gcc was doing. 1688 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 1689 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 1690 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 1691 I->getAnnotation(), D->getLocation()); 1692} 1693 1694llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 1695 llvm::Value *V) { 1696 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1697 llvm::Type *VTy = V->getType(); 1698 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 1699 CGM.Int8PtrTy); 1700 1701 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 1702 // FIXME Always emit the cast inst so we can differentiate between 1703 // annotation on the first field of a struct and annotation on the struct 1704 // itself. 1705 if (VTy != CGM.Int8PtrTy) 1706 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 1707 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); 1708 V = Builder.CreateBitCast(V, VTy); 1709 } 1710 1711 return V; 1712} 1713 1714CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 1715 1716CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 1717 : CGF(CGF) { 1718 assert(!CGF->IsSanitizerScope); 1719 CGF->IsSanitizerScope = true; 1720} 1721 1722CodeGenFunction::SanitizerScope::~SanitizerScope() { 1723 CGF->IsSanitizerScope = false; 1724} 1725 1726void CodeGenFunction::InsertHelper(llvm::Instruction *I, 1727 const llvm::Twine &Name, 1728 llvm::BasicBlock *BB, 1729 llvm::BasicBlock::iterator InsertPt) const { 1730 LoopStack.InsertHelper(I); 1731 if (IsSanitizerScope) 1732 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 1733} 1734 1735template <bool PreserveNames> 1736void CGBuilderInserter<PreserveNames>::InsertHelper( 1737 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 1738 llvm::BasicBlock::iterator InsertPt) const { 1739 llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB, 1740 InsertPt); 1741 if (CGF) 1742 CGF->InsertHelper(I, Name, BB, InsertPt); 1743} 1744 1745#ifdef NDEBUG 1746#define PreserveNames false 1747#else 1748#define PreserveNames true 1749#endif 1750template void CGBuilderInserter<PreserveNames>::InsertHelper( 1751 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 1752 llvm::BasicBlock::iterator InsertPt) const; 1753#undef PreserveNames 1754