CodeGenFunction.cpp revision 377ecc7996dce6803f7b7b6208cab5e197c9c5b8
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This coordinates the per-function state used while generating code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCUDARuntime.h" 17#include "CGCXXABI.h" 18#include "CGDebugInfo.h" 19#include "clang/Basic/TargetInfo.h" 20#include "clang/AST/ASTContext.h" 21#include "clang/AST/Decl.h" 22#include "clang/AST/DeclCXX.h" 23#include "clang/AST/StmtCXX.h" 24#include "clang/Frontend/CodeGenOptions.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Intrinsics.h" 27using namespace clang; 28using namespace CodeGen; 29 30CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) 31 : CodeGenTypeCache(cgm), CGM(cgm), 32 Target(CGM.getContext().getTargetInfo()), 33 Builder(cgm.getModule().getContext()), 34 AutoreleaseResult(false), BlockInfo(0), BlockPointer(0), 35 LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1), 36 FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0), 37 DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false), 38 IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0), 39 CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0), 40 CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0), 41 TerminateHandler(0), TrapBB(0) { 42 43 CatchUndefined = getContext().getLangOpts().CatchUndefined; 44 CGM.getCXXABI().getMangleContext().startNewFunction(); 45} 46 47CodeGenFunction::~CodeGenFunction() { 48 // If there are any unclaimed block infos, go ahead and destroy them 49 // now. This can happen if IR-gen gets clever and skips evaluating 50 // something. 51 if (FirstBlockInfo) 52 destroyBlockInfos(FirstBlockInfo); 53} 54 55 56llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 57 return CGM.getTypes().ConvertTypeForMem(T); 58} 59 60llvm::Type *CodeGenFunction::ConvertType(QualType T) { 61 return CGM.getTypes().ConvertType(T); 62} 63 64bool CodeGenFunction::hasAggregateLLVMType(QualType type) { 65 switch (type.getCanonicalType()->getTypeClass()) { 66#define TYPE(name, parent) 67#define ABSTRACT_TYPE(name, parent) 68#define NON_CANONICAL_TYPE(name, parent) case Type::name: 69#define DEPENDENT_TYPE(name, parent) case Type::name: 70#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 71#include "clang/AST/TypeNodes.def" 72 llvm_unreachable("non-canonical or dependent type in IR-generation"); 73 74 case Type::Builtin: 75 case Type::Pointer: 76 case Type::BlockPointer: 77 case Type::LValueReference: 78 case Type::RValueReference: 79 case Type::MemberPointer: 80 case Type::Vector: 81 case Type::ExtVector: 82 case Type::FunctionProto: 83 case Type::FunctionNoProto: 84 case Type::Enum: 85 case Type::ObjCObjectPointer: 86 return false; 87 88 // Complexes, arrays, records, and Objective-C objects. 89 case Type::Complex: 90 case Type::ConstantArray: 91 case Type::IncompleteArray: 92 case Type::VariableArray: 93 case Type::Record: 94 case Type::ObjCObject: 95 case Type::ObjCInterface: 96 return true; 97 98 // In IRGen, atomic types are just the underlying type 99 case Type::Atomic: 100 return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType()); 101 } 102 llvm_unreachable("unknown type kind!"); 103} 104 105void CodeGenFunction::EmitReturnBlock() { 106 // For cleanliness, we try to avoid emitting the return block for 107 // simple cases. 108 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 109 110 if (CurBB) { 111 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 112 113 // We have a valid insert point, reuse it if it is empty or there are no 114 // explicit jumps to the return block. 115 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 116 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 117 delete ReturnBlock.getBlock(); 118 } else 119 EmitBlock(ReturnBlock.getBlock()); 120 return; 121 } 122 123 // Otherwise, if the return block is the target of a single direct 124 // branch then we can just put the code in that block instead. This 125 // cleans up functions which started with a unified return block. 126 if (ReturnBlock.getBlock()->hasOneUse()) { 127 llvm::BranchInst *BI = 128 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin()); 129 if (BI && BI->isUnconditional() && 130 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 131 // Reset insertion point, including debug location, and delete the branch. 132 Builder.SetCurrentDebugLocation(BI->getDebugLoc()); 133 Builder.SetInsertPoint(BI->getParent()); 134 BI->eraseFromParent(); 135 delete ReturnBlock.getBlock(); 136 return; 137 } 138 } 139 140 // FIXME: We are at an unreachable point, there is no reason to emit the block 141 // unless it has uses. However, we still need a place to put the debug 142 // region.end for now. 143 144 EmitBlock(ReturnBlock.getBlock()); 145} 146 147static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 148 if (!BB) return; 149 if (!BB->use_empty()) 150 return CGF.CurFn->getBasicBlockList().push_back(BB); 151 delete BB; 152} 153 154void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 155 assert(BreakContinueStack.empty() && 156 "mismatched push/pop in break/continue stack!"); 157 158 // Pop any cleanups that might have been associated with the 159 // parameters. Do this in whatever block we're currently in; it's 160 // important to do this before we enter the return block or return 161 // edges will be *really* confused. 162 if (EHStack.stable_begin() != PrologueCleanupDepth) 163 PopCleanupBlocks(PrologueCleanupDepth); 164 165 // Emit function epilog (to return). 166 EmitReturnBlock(); 167 168 if (ShouldInstrumentFunction()) 169 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 170 171 // Emit debug descriptor for function end. 172 if (CGDebugInfo *DI = getDebugInfo()) { 173 DI->setLocation(EndLoc); 174 DI->EmitFunctionEnd(Builder); 175 } 176 177 EmitFunctionEpilog(*CurFnInfo); 178 EmitEndEHSpec(CurCodeDecl); 179 180 assert(EHStack.empty() && 181 "did not remove all scopes from cleanup stack!"); 182 183 // If someone did an indirect goto, emit the indirect goto block at the end of 184 // the function. 185 if (IndirectBranch) { 186 EmitBlock(IndirectBranch->getParent()); 187 Builder.ClearInsertionPoint(); 188 } 189 190 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 191 llvm::Instruction *Ptr = AllocaInsertPt; 192 AllocaInsertPt = 0; 193 Ptr->eraseFromParent(); 194 195 // If someone took the address of a label but never did an indirect goto, we 196 // made a zero entry PHI node, which is illegal, zap it now. 197 if (IndirectBranch) { 198 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 199 if (PN->getNumIncomingValues() == 0) { 200 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 201 PN->eraseFromParent(); 202 } 203 } 204 205 EmitIfUsed(*this, EHResumeBlock); 206 EmitIfUsed(*this, TerminateLandingPad); 207 EmitIfUsed(*this, TerminateHandler); 208 EmitIfUsed(*this, UnreachableBlock); 209 210 if (CGM.getCodeGenOpts().EmitDeclMetadata) 211 EmitDeclMetadata(); 212} 213 214/// ShouldInstrumentFunction - Return true if the current function should be 215/// instrumented with __cyg_profile_func_* calls 216bool CodeGenFunction::ShouldInstrumentFunction() { 217 if (!CGM.getCodeGenOpts().InstrumentFunctions) 218 return false; 219 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 220 return false; 221 return true; 222} 223 224/// EmitFunctionInstrumentation - Emit LLVM code to call the specified 225/// instrumentation function with the current function and the call site, if 226/// function instrumentation is enabled. 227void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 228 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 229 llvm::PointerType *PointerTy = Int8PtrTy; 230 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; 231 llvm::FunctionType *FunctionTy = 232 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false); 233 234 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 235 llvm::CallInst *CallSite = Builder.CreateCall( 236 CGM.getIntrinsic(llvm::Intrinsic::returnaddress), 237 llvm::ConstantInt::get(Int32Ty, 0), 238 "callsite"); 239 240 Builder.CreateCall2(F, 241 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 242 CallSite); 243} 244 245void CodeGenFunction::EmitMCountInstrumentation() { 246 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 247 248 llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy, 249 Target.getMCountName()); 250 Builder.CreateCall(MCountFn); 251} 252 253void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 254 llvm::Function *Fn, 255 const CGFunctionInfo &FnInfo, 256 const FunctionArgList &Args, 257 SourceLocation StartLoc) { 258 const Decl *D = GD.getDecl(); 259 260 DidCallStackSave = false; 261 CurCodeDecl = CurFuncDecl = D; 262 FnRetTy = RetTy; 263 CurFn = Fn; 264 CurFnInfo = &FnInfo; 265 assert(CurFn->isDeclaration() && "Function already has body?"); 266 267 // Pass inline keyword to optimizer if it appears explicitly on any 268 // declaration. 269 if (!CGM.getCodeGenOpts().NoInline) 270 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 271 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), 272 RE = FD->redecls_end(); RI != RE; ++RI) 273 if (RI->isInlineSpecified()) { 274 Fn->addFnAttr(llvm::Attribute::InlineHint); 275 break; 276 } 277 278 if (getContext().getLangOpts().OpenCL) { 279 // Add metadata for a kernel function. 280 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 281 if (FD->hasAttr<OpenCLKernelAttr>()) { 282 llvm::LLVMContext &Context = getLLVMContext(); 283 llvm::NamedMDNode *OpenCLMetadata = 284 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); 285 286 llvm::Value *Op = Fn; 287 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op)); 288 } 289 } 290 291 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 292 293 // Create a marker to make it easy to insert allocas into the entryblock 294 // later. Don't create this with the builder, because we don't want it 295 // folded. 296 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 297 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 298 if (Builder.isNamePreserving()) 299 AllocaInsertPt->setName("allocapt"); 300 301 ReturnBlock = getJumpDestInCurrentScope("return"); 302 303 Builder.SetInsertPoint(EntryBB); 304 305 // Emit subprogram debug descriptor. 306 if (CGDebugInfo *DI = getDebugInfo()) { 307 unsigned NumArgs = 0; 308 QualType *ArgsArray = new QualType[Args.size()]; 309 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 310 i != e; ++i) { 311 ArgsArray[NumArgs++] = (*i)->getType(); 312 } 313 314 QualType FnType = 315 getContext().getFunctionType(RetTy, ArgsArray, NumArgs, 316 FunctionProtoType::ExtProtoInfo()); 317 318 delete[] ArgsArray; 319 320 DI->setLocation(StartLoc); 321 DI->EmitFunctionStart(GD, FnType, CurFn, Builder); 322 } 323 324 if (ShouldInstrumentFunction()) 325 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 326 327 if (CGM.getCodeGenOpts().InstrumentForProfiling) 328 EmitMCountInstrumentation(); 329 330 if (RetTy->isVoidType()) { 331 // Void type; nothing to return. 332 ReturnValue = 0; 333 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 334 hasAggregateLLVMType(CurFnInfo->getReturnType())) { 335 // Indirect aggregate return; emit returned value directly into sret slot. 336 // This reduces code size, and affects correctness in C++. 337 ReturnValue = CurFn->arg_begin(); 338 } else { 339 ReturnValue = CreateIRTemp(RetTy, "retval"); 340 341 // Tell the epilog emitter to autorelease the result. We do this 342 // now so that various specialized functions can suppress it 343 // during their IR-generation. 344 if (getLangOpts().ObjCAutoRefCount && 345 !CurFnInfo->isReturnsRetained() && 346 RetTy->isObjCRetainableType()) 347 AutoreleaseResult = true; 348 } 349 350 EmitStartEHSpec(CurCodeDecl); 351 352 PrologueCleanupDepth = EHStack.stable_begin(); 353 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 354 355 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 356 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 357 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 358 if (MD->getParent()->isLambda() && 359 MD->getOverloadedOperator() == OO_Call) { 360 // We're in a lambda; figure out the captures. 361 MD->getParent()->getCaptureFields(LambdaCaptureFields, 362 LambdaThisCaptureField); 363 if (LambdaThisCaptureField) { 364 // If this lambda captures this, load it. 365 QualType LambdaTagType = 366 getContext().getTagDeclType(LambdaThisCaptureField->getParent()); 367 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, 368 LambdaTagType); 369 LValue ThisLValue = EmitLValueForField(LambdaLV, 370 LambdaThisCaptureField); 371 CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal(); 372 } 373 } else { 374 // Not in a lambda; just use 'this' from the method. 375 // FIXME: Should we generate a new load for each use of 'this'? The 376 // fast register allocator would be happier... 377 CXXThisValue = CXXABIThisValue; 378 } 379 } 380 381 // If any of the arguments have a variably modified type, make sure to 382 // emit the type size. 383 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 384 i != e; ++i) { 385 QualType Ty = (*i)->getType(); 386 387 if (Ty->isVariablyModifiedType()) 388 EmitVariablyModifiedType(Ty); 389 } 390 // Emit a location at the end of the prologue. 391 if (CGDebugInfo *DI = getDebugInfo()) 392 DI->EmitLocation(Builder, StartLoc); 393} 394 395void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) { 396 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl()); 397 assert(FD->getBody()); 398 EmitStmt(FD->getBody()); 399} 400 401/// Tries to mark the given function nounwind based on the 402/// non-existence of any throwing calls within it. We believe this is 403/// lightweight enough to do at -O0. 404static void TryMarkNoThrow(llvm::Function *F) { 405 // LLVM treats 'nounwind' on a function as part of the type, so we 406 // can't do this on functions that can be overwritten. 407 if (F->mayBeOverridden()) return; 408 409 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) 410 for (llvm::BasicBlock::iterator 411 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) 412 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) { 413 if (!Call->doesNotThrow()) 414 return; 415 } else if (isa<llvm::ResumeInst>(&*BI)) { 416 return; 417 } 418 F->setDoesNotThrow(true); 419} 420 421void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 422 const CGFunctionInfo &FnInfo) { 423 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 424 425 // Check if we should generate debug info for this function. 426 if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>()) 427 DebugInfo = CGM.getModuleDebugInfo(); 428 429 FunctionArgList Args; 430 QualType ResTy = FD->getResultType(); 431 432 CurGD = GD; 433 if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) 434 CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); 435 436 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 437 Args.push_back(FD->getParamDecl(i)); 438 439 SourceRange BodyRange; 440 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 441 442 // Emit the standard function prologue. 443 StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin()); 444 445 // Generate the body of the function. 446 if (isa<CXXDestructorDecl>(FD)) 447 EmitDestructorBody(Args); 448 else if (isa<CXXConstructorDecl>(FD)) 449 EmitConstructorBody(Args); 450 else if (getContext().getLangOpts().CUDA && 451 !CGM.getCodeGenOpts().CUDAIsDevice && 452 FD->hasAttr<CUDAGlobalAttr>()) 453 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); 454 else if (isa<CXXConversionDecl>(FD) && 455 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { 456 // The lambda conversion to block pointer is special; the semantics can't be 457 // expressed in the AST, so IRGen needs to special-case it. 458 EmitLambdaToBlockPointerBody(Args); 459 } else if (isa<CXXMethodDecl>(FD) && 460 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 461 // The lambda "__invoke" function is special, because it forwards or 462 // clones the body of the function call operator (but is actually static). 463 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); 464 } 465 else 466 EmitFunctionBody(Args); 467 468 // Emit the standard function epilogue. 469 FinishFunction(BodyRange.getEnd()); 470 471 // If we haven't marked the function nothrow through other means, do 472 // a quick pass now to see if we can. 473 if (!CurFn->doesNotThrow()) 474 TryMarkNoThrow(CurFn); 475} 476 477/// ContainsLabel - Return true if the statement contains a label in it. If 478/// this statement is not executed normally, it not containing a label means 479/// that we can just remove the code. 480bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 481 // Null statement, not a label! 482 if (S == 0) return false; 483 484 // If this is a label, we have to emit the code, consider something like: 485 // if (0) { ... foo: bar(); } goto foo; 486 // 487 // TODO: If anyone cared, we could track __label__'s, since we know that you 488 // can't jump to one from outside their declared region. 489 if (isa<LabelStmt>(S)) 490 return true; 491 492 // If this is a case/default statement, and we haven't seen a switch, we have 493 // to emit the code. 494 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 495 return true; 496 497 // If this is a switch statement, we want to ignore cases below it. 498 if (isa<SwitchStmt>(S)) 499 IgnoreCaseStmts = true; 500 501 // Scan subexpressions for verboten labels. 502 for (Stmt::const_child_range I = S->children(); I; ++I) 503 if (ContainsLabel(*I, IgnoreCaseStmts)) 504 return true; 505 506 return false; 507} 508 509/// containsBreak - Return true if the statement contains a break out of it. 510/// If the statement (recursively) contains a switch or loop with a break 511/// inside of it, this is fine. 512bool CodeGenFunction::containsBreak(const Stmt *S) { 513 // Null statement, not a label! 514 if (S == 0) return false; 515 516 // If this is a switch or loop that defines its own break scope, then we can 517 // include it and anything inside of it. 518 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 519 isa<ForStmt>(S)) 520 return false; 521 522 if (isa<BreakStmt>(S)) 523 return true; 524 525 // Scan subexpressions for verboten breaks. 526 for (Stmt::const_child_range I = S->children(); I; ++I) 527 if (containsBreak(*I)) 528 return true; 529 530 return false; 531} 532 533 534/// ConstantFoldsToSimpleInteger - If the specified expression does not fold 535/// to a constant, or if it does but contains a label, return false. If it 536/// constant folds return true and set the boolean result in Result. 537bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 538 bool &ResultBool) { 539 llvm::APInt ResultInt; 540 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt)) 541 return false; 542 543 ResultBool = ResultInt.getBoolValue(); 544 return true; 545} 546 547/// ConstantFoldsToSimpleInteger - If the specified expression does not fold 548/// to a constant, or if it does but contains a label, return false. If it 549/// constant folds return true and set the folded value. 550bool CodeGenFunction:: 551ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) { 552 // FIXME: Rename and handle conversion of other evaluatable things 553 // to bool. 554 llvm::APSInt Int; 555 if (!Cond->EvaluateAsInt(Int, getContext())) 556 return false; // Not foldable, not integer or not fully evaluatable. 557 558 if (CodeGenFunction::ContainsLabel(Cond)) 559 return false; // Contains a label. 560 561 ResultInt = Int; 562 return true; 563} 564 565 566 567/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 568/// statement) to the specified blocks. Based on the condition, this might try 569/// to simplify the codegen of the conditional based on the branch. 570/// 571void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 572 llvm::BasicBlock *TrueBlock, 573 llvm::BasicBlock *FalseBlock) { 574 Cond = Cond->IgnoreParens(); 575 576 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 577 // Handle X && Y in a condition. 578 if (CondBOp->getOpcode() == BO_LAnd) { 579 // If we have "1 && X", simplify the code. "0 && X" would have constant 580 // folded if the case was simple enough. 581 bool ConstantBool = false; 582 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 583 ConstantBool) { 584 // br(1 && X) -> br(X). 585 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 586 } 587 588 // If we have "X && 1", simplify the code to use an uncond branch. 589 // "X && 0" would have been constant folded to 0. 590 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 591 ConstantBool) { 592 // br(X && 1) -> br(X). 593 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 594 } 595 596 // Emit the LHS as a conditional. If the LHS conditional is false, we 597 // want to jump to the FalseBlock. 598 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 599 600 ConditionalEvaluation eval(*this); 601 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 602 EmitBlock(LHSTrue); 603 604 // Any temporaries created here are conditional. 605 eval.begin(*this); 606 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 607 eval.end(*this); 608 609 return; 610 } 611 612 if (CondBOp->getOpcode() == BO_LOr) { 613 // If we have "0 || X", simplify the code. "1 || X" would have constant 614 // folded if the case was simple enough. 615 bool ConstantBool = false; 616 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 617 !ConstantBool) { 618 // br(0 || X) -> br(X). 619 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 620 } 621 622 // If we have "X || 0", simplify the code to use an uncond branch. 623 // "X || 1" would have been constant folded to 1. 624 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 625 !ConstantBool) { 626 // br(X || 0) -> br(X). 627 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 628 } 629 630 // Emit the LHS as a conditional. If the LHS conditional is true, we 631 // want to jump to the TrueBlock. 632 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 633 634 ConditionalEvaluation eval(*this); 635 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 636 EmitBlock(LHSFalse); 637 638 // Any temporaries created here are conditional. 639 eval.begin(*this); 640 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 641 eval.end(*this); 642 643 return; 644 } 645 } 646 647 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 648 // br(!x, t, f) -> br(x, f, t) 649 if (CondUOp->getOpcode() == UO_LNot) 650 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 651 } 652 653 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 654 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 655 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 656 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 657 658 ConditionalEvaluation cond(*this); 659 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 660 661 cond.begin(*this); 662 EmitBlock(LHSBlock); 663 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 664 cond.end(*this); 665 666 cond.begin(*this); 667 EmitBlock(RHSBlock); 668 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 669 cond.end(*this); 670 671 return; 672 } 673 674 // Emit the code with the fully general case. 675 llvm::Value *CondV = EvaluateExprAsBool(Cond); 676 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 677} 678 679/// ErrorUnsupported - Print out an error that codegen doesn't support the 680/// specified stmt yet. 681void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 682 bool OmitOnError) { 683 CGM.ErrorUnsupported(S, Type, OmitOnError); 684} 685 686/// emitNonZeroVLAInit - Emit the "zero" initialization of a 687/// variable-length array whose elements have a non-zero bit-pattern. 688/// 689/// \param src - a char* pointing to the bit-pattern for a single 690/// base element of the array 691/// \param sizeInChars - the total size of the VLA, in chars 692/// \param align - the total alignment of the VLA 693static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 694 llvm::Value *dest, llvm::Value *src, 695 llvm::Value *sizeInChars) { 696 std::pair<CharUnits,CharUnits> baseSizeAndAlign 697 = CGF.getContext().getTypeInfoInChars(baseType); 698 699 CGBuilderTy &Builder = CGF.Builder; 700 701 llvm::Value *baseSizeInChars 702 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity()); 703 704 llvm::Type *i8p = Builder.getInt8PtrTy(); 705 706 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin"); 707 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end"); 708 709 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 710 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 711 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 712 713 // Make a loop over the VLA. C99 guarantees that the VLA element 714 // count must be nonzero. 715 CGF.EmitBlock(loopBB); 716 717 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur"); 718 cur->addIncoming(begin, originBB); 719 720 // memcpy the individual element bit-pattern. 721 Builder.CreateMemCpy(cur, src, baseSizeInChars, 722 baseSizeAndAlign.second.getQuantity(), 723 /*volatile*/ false); 724 725 // Go to the next element. 726 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next"); 727 728 // Leave if that's the end of the VLA. 729 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 730 Builder.CreateCondBr(done, contBB, loopBB); 731 cur->addIncoming(next, loopBB); 732 733 CGF.EmitBlock(contBB); 734} 735 736void 737CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 738 // Ignore empty classes in C++. 739 if (getContext().getLangOpts().CPlusPlus) { 740 if (const RecordType *RT = Ty->getAs<RecordType>()) { 741 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 742 return; 743 } 744 } 745 746 // Cast the dest ptr to the appropriate i8 pointer type. 747 unsigned DestAS = 748 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 749 llvm::Type *BP = Builder.getInt8PtrTy(DestAS); 750 if (DestPtr->getType() != BP) 751 DestPtr = Builder.CreateBitCast(DestPtr, BP); 752 753 // Get size and alignment info for this aggregate. 754 std::pair<CharUnits, CharUnits> TypeInfo = 755 getContext().getTypeInfoInChars(Ty); 756 CharUnits Size = TypeInfo.first; 757 CharUnits Align = TypeInfo.second; 758 759 llvm::Value *SizeVal; 760 const VariableArrayType *vla; 761 762 // Don't bother emitting a zero-byte memset. 763 if (Size.isZero()) { 764 // But note that getTypeInfo returns 0 for a VLA. 765 if (const VariableArrayType *vlaType = 766 dyn_cast_or_null<VariableArrayType>( 767 getContext().getAsArrayType(Ty))) { 768 QualType eltType; 769 llvm::Value *numElts; 770 llvm::tie(numElts, eltType) = getVLASize(vlaType); 771 772 SizeVal = numElts; 773 CharUnits eltSize = getContext().getTypeSizeInChars(eltType); 774 if (!eltSize.isOne()) 775 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 776 vla = vlaType; 777 } else { 778 return; 779 } 780 } else { 781 SizeVal = CGM.getSize(Size); 782 vla = 0; 783 } 784 785 // If the type contains a pointer to data member we can't memset it to zero. 786 // Instead, create a null constant and copy it to the destination. 787 // TODO: there are other patterns besides zero that we can usefully memset, 788 // like -1, which happens to be the pattern used by member-pointers. 789 if (!CGM.getTypes().isZeroInitializable(Ty)) { 790 // For a VLA, emit a single element, then splat that over the VLA. 791 if (vla) Ty = getContext().getBaseElementType(vla); 792 793 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 794 795 llvm::GlobalVariable *NullVariable = 796 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 797 /*isConstant=*/true, 798 llvm::GlobalVariable::PrivateLinkage, 799 NullConstant, Twine()); 800 llvm::Value *SrcPtr = 801 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); 802 803 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 804 805 // Get and call the appropriate llvm.memcpy overload. 806 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false); 807 return; 808 } 809 810 // Otherwise, just memset the whole thing to zero. This is legal 811 // because in LLVM, all default initializers (other than the ones we just 812 // handled above) are guaranteed to have a bit pattern of all zeros. 813 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, 814 Align.getQuantity(), false); 815} 816 817llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 818 // Make sure that there is a block for the indirect goto. 819 if (IndirectBranch == 0) 820 GetIndirectGotoBlock(); 821 822 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 823 824 // Make sure the indirect branch includes all of the address-taken blocks. 825 IndirectBranch->addDestination(BB); 826 return llvm::BlockAddress::get(CurFn, BB); 827} 828 829llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 830 // If we already made the indirect branch for indirect goto, return its block. 831 if (IndirectBranch) return IndirectBranch->getParent(); 832 833 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 834 835 // Create the PHI node that indirect gotos will add entries to. 836 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 837 "indirect.goto.dest"); 838 839 // Create the indirect branch instruction. 840 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 841 return IndirectBranch->getParent(); 842} 843 844/// Computes the length of an array in elements, as well as the base 845/// element type and a properly-typed first element pointer. 846llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 847 QualType &baseType, 848 llvm::Value *&addr) { 849 const ArrayType *arrayType = origArrayType; 850 851 // If it's a VLA, we have to load the stored size. Note that 852 // this is the size of the VLA in bytes, not its size in elements. 853 llvm::Value *numVLAElements = 0; 854 if (isa<VariableArrayType>(arrayType)) { 855 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; 856 857 // Walk into all VLAs. This doesn't require changes to addr, 858 // which has type T* where T is the first non-VLA element type. 859 do { 860 QualType elementType = arrayType->getElementType(); 861 arrayType = getContext().getAsArrayType(elementType); 862 863 // If we only have VLA components, 'addr' requires no adjustment. 864 if (!arrayType) { 865 baseType = elementType; 866 return numVLAElements; 867 } 868 } while (isa<VariableArrayType>(arrayType)); 869 870 // We get out here only if we find a constant array type 871 // inside the VLA. 872 } 873 874 // We have some number of constant-length arrays, so addr should 875 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 876 // down to the first element of addr. 877 SmallVector<llvm::Value*, 8> gepIndices; 878 879 // GEP down to the array type. 880 llvm::ConstantInt *zero = Builder.getInt32(0); 881 gepIndices.push_back(zero); 882 883 // It's more efficient to calculate the count from the LLVM 884 // constant-length arrays than to re-evaluate the array bounds. 885 uint64_t countFromCLAs = 1; 886 887 llvm::ArrayType *llvmArrayType = 888 cast<llvm::ArrayType>( 889 cast<llvm::PointerType>(addr->getType())->getElementType()); 890 while (true) { 891 assert(isa<ConstantArrayType>(arrayType)); 892 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 893 == llvmArrayType->getNumElements()); 894 895 gepIndices.push_back(zero); 896 countFromCLAs *= llvmArrayType->getNumElements(); 897 898 llvmArrayType = 899 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 900 if (!llvmArrayType) break; 901 902 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 903 assert(arrayType && "LLVM and Clang types are out-of-synch"); 904 } 905 906 baseType = arrayType->getElementType(); 907 908 // Create the actual GEP. 909 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin"); 910 911 llvm::Value *numElements 912 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 913 914 // If we had any VLA dimensions, factor them in. 915 if (numVLAElements) 916 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 917 918 return numElements; 919} 920 921std::pair<llvm::Value*, QualType> 922CodeGenFunction::getVLASize(QualType type) { 923 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 924 assert(vla && "type was not a variable array type!"); 925 return getVLASize(vla); 926} 927 928std::pair<llvm::Value*, QualType> 929CodeGenFunction::getVLASize(const VariableArrayType *type) { 930 // The number of elements so far; always size_t. 931 llvm::Value *numElements = 0; 932 933 QualType elementType; 934 do { 935 elementType = type->getElementType(); 936 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 937 assert(vlaSize && "no size for VLA!"); 938 assert(vlaSize->getType() == SizeTy); 939 940 if (!numElements) { 941 numElements = vlaSize; 942 } else { 943 // It's undefined behavior if this wraps around, so mark it that way. 944 numElements = Builder.CreateNUWMul(numElements, vlaSize); 945 } 946 } while ((type = getContext().getAsVariableArrayType(elementType))); 947 948 return std::pair<llvm::Value*,QualType>(numElements, elementType); 949} 950 951void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 952 assert(type->isVariablyModifiedType() && 953 "Must pass variably modified type to EmitVLASizes!"); 954 955 EnsureInsertPoint(); 956 957 // We're going to walk down into the type and look for VLA 958 // expressions. 959 do { 960 assert(type->isVariablyModifiedType()); 961 962 const Type *ty = type.getTypePtr(); 963 switch (ty->getTypeClass()) { 964 965#define TYPE(Class, Base) 966#define ABSTRACT_TYPE(Class, Base) 967#define NON_CANONICAL_TYPE(Class, Base) 968#define DEPENDENT_TYPE(Class, Base) case Type::Class: 969#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 970#include "clang/AST/TypeNodes.def" 971 llvm_unreachable("unexpected dependent type!"); 972 973 // These types are never variably-modified. 974 case Type::Builtin: 975 case Type::Complex: 976 case Type::Vector: 977 case Type::ExtVector: 978 case Type::Record: 979 case Type::Enum: 980 case Type::Elaborated: 981 case Type::TemplateSpecialization: 982 case Type::ObjCObject: 983 case Type::ObjCInterface: 984 case Type::ObjCObjectPointer: 985 llvm_unreachable("type class is never variably-modified!"); 986 987 case Type::Pointer: 988 type = cast<PointerType>(ty)->getPointeeType(); 989 break; 990 991 case Type::BlockPointer: 992 type = cast<BlockPointerType>(ty)->getPointeeType(); 993 break; 994 995 case Type::LValueReference: 996 case Type::RValueReference: 997 type = cast<ReferenceType>(ty)->getPointeeType(); 998 break; 999 1000 case Type::MemberPointer: 1001 type = cast<MemberPointerType>(ty)->getPointeeType(); 1002 break; 1003 1004 case Type::ConstantArray: 1005 case Type::IncompleteArray: 1006 // Losing element qualification here is fine. 1007 type = cast<ArrayType>(ty)->getElementType(); 1008 break; 1009 1010 case Type::VariableArray: { 1011 // Losing element qualification here is fine. 1012 const VariableArrayType *vat = cast<VariableArrayType>(ty); 1013 1014 // Unknown size indication requires no size computation. 1015 // Otherwise, evaluate and record it. 1016 if (const Expr *size = vat->getSizeExpr()) { 1017 // It's possible that we might have emitted this already, 1018 // e.g. with a typedef and a pointer to it. 1019 llvm::Value *&entry = VLASizeMap[size]; 1020 if (!entry) { 1021 // Always zexting here would be wrong if it weren't 1022 // undefined behavior to have a negative bound. 1023 entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy, 1024 /*signed*/ false); 1025 } 1026 } 1027 type = vat->getElementType(); 1028 break; 1029 } 1030 1031 case Type::FunctionProto: 1032 case Type::FunctionNoProto: 1033 type = cast<FunctionType>(ty)->getResultType(); 1034 break; 1035 1036 case Type::Paren: 1037 case Type::TypeOf: 1038 case Type::UnaryTransform: 1039 case Type::Attributed: 1040 case Type::SubstTemplateTypeParm: 1041 // Keep walking after single level desugaring. 1042 type = type.getSingleStepDesugaredType(getContext()); 1043 break; 1044 1045 case Type::Typedef: 1046 case Type::Decltype: 1047 case Type::Auto: 1048 // Stop walking: nothing to do. 1049 return; 1050 1051 case Type::TypeOfExpr: 1052 // Stop walking: emit typeof expression. 1053 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 1054 return; 1055 1056 case Type::Atomic: 1057 type = cast<AtomicType>(ty)->getValueType(); 1058 break; 1059 } 1060 } while (type->isVariablyModifiedType()); 1061} 1062 1063llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 1064 if (getContext().getBuiltinVaListType()->isArrayType()) 1065 return EmitScalarExpr(E); 1066 return EmitLValue(E).getAddress(); 1067} 1068 1069void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 1070 llvm::Constant *Init) { 1071 assert (Init && "Invalid DeclRefExpr initializer!"); 1072 if (CGDebugInfo *Dbg = getDebugInfo()) 1073 Dbg->EmitGlobalVariable(E->getDecl(), Init); 1074} 1075 1076CodeGenFunction::PeepholeProtection 1077CodeGenFunction::protectFromPeepholes(RValue rvalue) { 1078 // At the moment, the only aggressive peephole we do in IR gen 1079 // is trunc(zext) folding, but if we add more, we can easily 1080 // extend this protection. 1081 1082 if (!rvalue.isScalar()) return PeepholeProtection(); 1083 llvm::Value *value = rvalue.getScalarVal(); 1084 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 1085 1086 // Just make an extra bitcast. 1087 assert(HaveInsertPoint()); 1088 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 1089 Builder.GetInsertBlock()); 1090 1091 PeepholeProtection protection; 1092 protection.Inst = inst; 1093 return protection; 1094} 1095 1096void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 1097 if (!protection.Inst) return; 1098 1099 // In theory, we could try to duplicate the peepholes now, but whatever. 1100 protection.Inst->eraseFromParent(); 1101} 1102 1103llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 1104 llvm::Value *AnnotatedVal, 1105 llvm::StringRef AnnotationStr, 1106 SourceLocation Location) { 1107 llvm::Value *Args[4] = { 1108 AnnotatedVal, 1109 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 1110 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 1111 CGM.EmitAnnotationLineNo(Location) 1112 }; 1113 return Builder.CreateCall(AnnotationFn, Args); 1114} 1115 1116void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 1117 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1118 // FIXME We create a new bitcast for every annotation because that's what 1119 // llvm-gcc was doing. 1120 for (specific_attr_iterator<AnnotateAttr> 1121 ai = D->specific_attr_begin<AnnotateAttr>(), 1122 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) 1123 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 1124 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 1125 (*ai)->getAnnotation(), D->getLocation()); 1126} 1127 1128llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 1129 llvm::Value *V) { 1130 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1131 llvm::Type *VTy = V->getType(); 1132 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 1133 CGM.Int8PtrTy); 1134 1135 for (specific_attr_iterator<AnnotateAttr> 1136 ai = D->specific_attr_begin<AnnotateAttr>(), 1137 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) { 1138 // FIXME Always emit the cast inst so we can differentiate between 1139 // annotation on the first field of a struct and annotation on the struct 1140 // itself. 1141 if (VTy != CGM.Int8PtrTy) 1142 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 1143 V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation()); 1144 V = Builder.CreateBitCast(V, VTy); 1145 } 1146 1147 return V; 1148} 1149