CGDecl.cpp revision 830937bc1100fba7682f7c32c40512085870f50c
1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Decl nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CGDebugInfo.h" 15#include "CodeGenFunction.h" 16#include "CodeGenModule.h" 17#include "clang/AST/ASTContext.h" 18#include "clang/AST/CharUnits.h" 19#include "clang/AST/Decl.h" 20#include "clang/AST/DeclObjC.h" 21#include "clang/Basic/SourceManager.h" 22#include "clang/Basic/TargetInfo.h" 23#include "clang/Frontend/CodeGenOptions.h" 24#include "llvm/GlobalVariable.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/Target/TargetData.h" 27#include "llvm/Type.h" 28using namespace clang; 29using namespace CodeGen; 30 31 32void CodeGenFunction::EmitDecl(const Decl &D) { 33 switch (D.getKind()) { 34 case Decl::TranslationUnit: 35 case Decl::Namespace: 36 case Decl::UnresolvedUsingTypename: 37 case Decl::ClassTemplateSpecialization: 38 case Decl::ClassTemplatePartialSpecialization: 39 case Decl::TemplateTypeParm: 40 case Decl::UnresolvedUsingValue: 41 case Decl::NonTypeTemplateParm: 42 case Decl::CXXMethod: 43 case Decl::CXXConstructor: 44 case Decl::CXXDestructor: 45 case Decl::CXXConversion: 46 case Decl::Field: 47 case Decl::IndirectField: 48 case Decl::ObjCIvar: 49 case Decl::ObjCAtDefsField: 50 case Decl::ParmVar: 51 case Decl::ImplicitParam: 52 case Decl::ClassTemplate: 53 case Decl::FunctionTemplate: 54 case Decl::TemplateTemplateParm: 55 case Decl::ObjCMethod: 56 case Decl::ObjCCategory: 57 case Decl::ObjCProtocol: 58 case Decl::ObjCInterface: 59 case Decl::ObjCCategoryImpl: 60 case Decl::ObjCImplementation: 61 case Decl::ObjCProperty: 62 case Decl::ObjCCompatibleAlias: 63 case Decl::AccessSpec: 64 case Decl::LinkageSpec: 65 case Decl::ObjCPropertyImpl: 66 case Decl::ObjCClass: 67 case Decl::ObjCForwardProtocol: 68 case Decl::FileScopeAsm: 69 case Decl::Friend: 70 case Decl::FriendTemplate: 71 case Decl::Block: 72 73 assert(0 && "Declaration not should not be in declstmts!"); 74 case Decl::Function: // void X(); 75 case Decl::Record: // struct/union/class X; 76 case Decl::Enum: // enum X; 77 case Decl::EnumConstant: // enum ? { X = ? } 78 case Decl::CXXRecord: // struct/union/class X; [C++] 79 case Decl::Using: // using X; [C++] 80 case Decl::UsingShadow: 81 case Decl::UsingDirective: // using namespace X; [C++] 82 case Decl::NamespaceAlias: 83 case Decl::StaticAssert: // static_assert(X, ""); [C++0x] 84 // None of these decls require codegen support. 85 return; 86 87 case Decl::Var: { 88 const VarDecl &VD = cast<VarDecl>(D); 89 assert(VD.isLocalVarDecl() && 90 "Should not see file-scope variables inside a function!"); 91 return EmitVarDecl(VD); 92 } 93 94 case Decl::Typedef: { // typedef int X; 95 const TypedefDecl &TD = cast<TypedefDecl>(D); 96 QualType Ty = TD.getUnderlyingType(); 97 98 if (Ty->isVariablyModifiedType()) 99 EmitVLASize(Ty); 100 } 101 } 102} 103 104/// EmitVarDecl - This method handles emission of any variable declaration 105/// inside a function, including static vars etc. 106void CodeGenFunction::EmitVarDecl(const VarDecl &D) { 107 if (D.hasAttr<AsmLabelAttr>()) 108 CGM.ErrorUnsupported(&D, "__asm__"); 109 110 switch (D.getStorageClass()) { 111 case SC_None: 112 case SC_Auto: 113 case SC_Register: 114 return EmitAutoVarDecl(D); 115 case SC_Static: { 116 llvm::GlobalValue::LinkageTypes Linkage = 117 llvm::GlobalValue::InternalLinkage; 118 119 // If the function definition has some sort of weak linkage, its 120 // static variables should also be weak so that they get properly 121 // uniqued. We can't do this in C, though, because there's no 122 // standard way to agree on which variables are the same (i.e. 123 // there's no mangling). 124 if (getContext().getLangOptions().CPlusPlus) 125 if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage())) 126 Linkage = CurFn->getLinkage(); 127 128 return EmitStaticVarDecl(D, Linkage); 129 } 130 case SC_Extern: 131 case SC_PrivateExtern: 132 // Don't emit it now, allow it to be emitted lazily on its first use. 133 return; 134 } 135 136 assert(0 && "Unknown storage class"); 137} 138 139static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D, 140 const char *Separator) { 141 CodeGenModule &CGM = CGF.CGM; 142 if (CGF.getContext().getLangOptions().CPlusPlus) { 143 llvm::StringRef Name = CGM.getMangledName(&D); 144 return Name.str(); 145 } 146 147 std::string ContextName; 148 if (!CGF.CurFuncDecl) { 149 // Better be in a block declared in global scope. 150 const NamedDecl *ND = cast<NamedDecl>(&D); 151 const DeclContext *DC = ND->getDeclContext(); 152 if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) { 153 MangleBuffer Name; 154 CGM.getMangledName(GlobalDecl(), Name, BD); 155 ContextName = Name.getString(); 156 } 157 else 158 assert(0 && "Unknown context for block static var decl"); 159 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) { 160 llvm::StringRef Name = CGM.getMangledName(FD); 161 ContextName = Name.str(); 162 } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) 163 ContextName = CGF.CurFn->getName(); 164 else 165 assert(0 && "Unknown context for static var decl"); 166 167 return ContextName + Separator + D.getNameAsString(); 168} 169 170llvm::GlobalVariable * 171CodeGenFunction::CreateStaticVarDecl(const VarDecl &D, 172 const char *Separator, 173 llvm::GlobalValue::LinkageTypes Linkage) { 174 QualType Ty = D.getType(); 175 assert(Ty->isConstantSizeType() && "VLAs can't be static"); 176 177 std::string Name = GetStaticDeclName(*this, D, Separator); 178 179 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty); 180 llvm::GlobalVariable *GV = 181 new llvm::GlobalVariable(CGM.getModule(), LTy, 182 Ty.isConstant(getContext()), Linkage, 183 CGM.EmitNullConstant(D.getType()), Name, 0, 184 D.isThreadSpecified(), Ty.getAddressSpace()); 185 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 186 if (Linkage != llvm::GlobalValue::InternalLinkage) 187 GV->setVisibility(CurFn->getVisibility()); 188 return GV; 189} 190 191/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the 192/// global variable that has already been created for it. If the initializer 193/// has a different type than GV does, this may free GV and return a different 194/// one. Otherwise it just returns GV. 195llvm::GlobalVariable * 196CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D, 197 llvm::GlobalVariable *GV) { 198 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this); 199 200 // If constant emission failed, then this should be a C++ static 201 // initializer. 202 if (!Init) { 203 if (!getContext().getLangOptions().CPlusPlus) 204 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); 205 else if (Builder.GetInsertBlock()) { 206 // Since we have a static initializer, this global variable can't 207 // be constant. 208 GV->setConstant(false); 209 210 EmitCXXGuardedInit(D, GV); 211 } 212 return GV; 213 } 214 215 // The initializer may differ in type from the global. Rewrite 216 // the global to match the initializer. (We have to do this 217 // because some types, like unions, can't be completely represented 218 // in the LLVM type system.) 219 if (GV->getType()->getElementType() != Init->getType()) { 220 llvm::GlobalVariable *OldGV = GV; 221 222 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), 223 OldGV->isConstant(), 224 OldGV->getLinkage(), Init, "", 225 /*InsertBefore*/ OldGV, 226 D.isThreadSpecified(), 227 D.getType().getAddressSpace()); 228 GV->setVisibility(OldGV->getVisibility()); 229 230 // Steal the name of the old global 231 GV->takeName(OldGV); 232 233 // Replace all uses of the old global with the new global 234 llvm::Constant *NewPtrForOldDecl = 235 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 236 OldGV->replaceAllUsesWith(NewPtrForOldDecl); 237 238 // Erase the old global, since it is no longer used. 239 OldGV->eraseFromParent(); 240 } 241 242 GV->setInitializer(Init); 243 return GV; 244} 245 246void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D, 247 llvm::GlobalValue::LinkageTypes Linkage) { 248 llvm::Value *&DMEntry = LocalDeclMap[&D]; 249 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 250 251 llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage); 252 253 // Store into LocalDeclMap before generating initializer to handle 254 // circular references. 255 DMEntry = GV; 256 257 // We can't have a VLA here, but we can have a pointer to a VLA, 258 // even though that doesn't really make any sense. 259 // Make sure to evaluate VLA bounds now so that we have them for later. 260 if (D.getType()->isVariablyModifiedType()) 261 EmitVLASize(D.getType()); 262 263 // Local static block variables must be treated as globals as they may be 264 // referenced in their RHS initializer block-literal expresion. 265 CGM.setStaticLocalDeclAddress(&D, GV); 266 267 // If this value has an initializer, emit it. 268 if (D.getInit()) 269 GV = AddInitializerToStaticVarDecl(D, GV); 270 271 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 272 273 // FIXME: Merge attribute handling. 274 if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) { 275 SourceManager &SM = CGM.getContext().getSourceManager(); 276 llvm::Constant *Ann = 277 CGM.EmitAnnotateAttr(GV, AA, 278 SM.getInstantiationLineNumber(D.getLocation())); 279 CGM.AddAnnotation(Ann); 280 } 281 282 if (const SectionAttr *SA = D.getAttr<SectionAttr>()) 283 GV->setSection(SA->getName()); 284 285 if (D.hasAttr<UsedAttr>()) 286 CGM.AddUsedGlobal(GV); 287 288 // We may have to cast the constant because of the initializer 289 // mismatch above. 290 // 291 // FIXME: It is really dangerous to store this in the map; if anyone 292 // RAUW's the GV uses of this constant will be invalid. 293 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType()); 294 const llvm::Type *LPtrTy = LTy->getPointerTo(D.getType().getAddressSpace()); 295 DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy); 296 297 // Emit global variable debug descriptor for static vars. 298 CGDebugInfo *DI = getDebugInfo(); 299 if (DI) { 300 DI->setLocation(D.getLocation()); 301 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D); 302 } 303} 304 305unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { 306 assert(ByRefValueInfo.count(VD) && "Did not find value!"); 307 308 return ByRefValueInfo.find(VD)->second.second; 309} 310 311/// BuildByRefType - This routine changes a __block variable declared as T x 312/// into: 313/// 314/// struct { 315/// void *__isa; 316/// void *__forwarding; 317/// int32_t __flags; 318/// int32_t __size; 319/// void *__copy_helper; // only if needed 320/// void *__destroy_helper; // only if needed 321/// char padding[X]; // only if needed 322/// T x; 323/// } x 324/// 325const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) { 326 std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D]; 327 if (Info.first) 328 return Info.first; 329 330 QualType Ty = D->getType(); 331 332 std::vector<const llvm::Type *> Types; 333 334 const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); 335 336 llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext); 337 338 // void *__isa; 339 Types.push_back(Int8PtrTy); 340 341 // void *__forwarding; 342 Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder)); 343 344 // int32_t __flags; 345 Types.push_back(Int32Ty); 346 347 // int32_t __size; 348 Types.push_back(Int32Ty); 349 350 bool HasCopyAndDispose = BlockRequiresCopying(Ty); 351 if (HasCopyAndDispose) { 352 /// void *__copy_helper; 353 Types.push_back(Int8PtrTy); 354 355 /// void *__destroy_helper; 356 Types.push_back(Int8PtrTy); 357 } 358 359 bool Packed = false; 360 CharUnits Align = getContext().getDeclAlign(D); 361 if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) { 362 // We have to insert padding. 363 364 // The struct above has 2 32-bit integers. 365 unsigned CurrentOffsetInBytes = 4 * 2; 366 367 // And either 2 or 4 pointers. 368 CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) * 369 CGM.getTargetData().getTypeAllocSize(Int8PtrTy); 370 371 // Align the offset. 372 unsigned AlignedOffsetInBytes = 373 llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity()); 374 375 unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes; 376 if (NumPaddingBytes > 0) { 377 const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext); 378 // FIXME: We need a sema error for alignment larger than the minimum of 379 // the maximal stack alignmint and the alignment of malloc on the system. 380 if (NumPaddingBytes > 1) 381 Ty = llvm::ArrayType::get(Ty, NumPaddingBytes); 382 383 Types.push_back(Ty); 384 385 // We want a packed struct. 386 Packed = true; 387 } 388 } 389 390 // T x; 391 Types.push_back(ConvertTypeForMem(Ty)); 392 393 const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed); 394 395 cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T); 396 CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(), 397 ByRefTypeHolder.get()); 398 399 Info.first = ByRefTypeHolder.get(); 400 401 Info.second = Types.size() - 1; 402 403 return Info.first; 404} 405 406namespace { 407 struct CallArrayDtor : EHScopeStack::Cleanup { 408 CallArrayDtor(const CXXDestructorDecl *Dtor, 409 const ConstantArrayType *Type, 410 llvm::Value *Loc) 411 : Dtor(Dtor), Type(Type), Loc(Loc) {} 412 413 const CXXDestructorDecl *Dtor; 414 const ConstantArrayType *Type; 415 llvm::Value *Loc; 416 417 void Emit(CodeGenFunction &CGF, bool IsForEH) { 418 QualType BaseElementTy = CGF.getContext().getBaseElementType(Type); 419 const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy); 420 BasePtr = llvm::PointerType::getUnqual(BasePtr); 421 llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr); 422 CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr); 423 } 424 }; 425 426 struct CallVarDtor : EHScopeStack::Cleanup { 427 CallVarDtor(const CXXDestructorDecl *Dtor, 428 llvm::Value *NRVOFlag, 429 llvm::Value *Loc) 430 : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {} 431 432 const CXXDestructorDecl *Dtor; 433 llvm::Value *NRVOFlag; 434 llvm::Value *Loc; 435 436 void Emit(CodeGenFunction &CGF, bool IsForEH) { 437 // Along the exceptions path we always execute the dtor. 438 bool NRVO = !IsForEH && NRVOFlag; 439 440 llvm::BasicBlock *SkipDtorBB = 0; 441 if (NRVO) { 442 // If we exited via NRVO, we skip the destructor call. 443 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused"); 444 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor"); 445 llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val"); 446 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB); 447 CGF.EmitBlock(RunDtorBB); 448 } 449 450 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 451 /*ForVirtualBase=*/false, Loc); 452 453 if (NRVO) CGF.EmitBlock(SkipDtorBB); 454 } 455 }; 456} 457 458namespace { 459 struct CallStackRestore : EHScopeStack::Cleanup { 460 llvm::Value *Stack; 461 CallStackRestore(llvm::Value *Stack) : Stack(Stack) {} 462 void Emit(CodeGenFunction &CGF, bool IsForEH) { 463 llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp"); 464 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 465 CGF.Builder.CreateCall(F, V); 466 } 467 }; 468 469 struct CallCleanupFunction : EHScopeStack::Cleanup { 470 llvm::Constant *CleanupFn; 471 const CGFunctionInfo &FnInfo; 472 llvm::Value *Addr; 473 const VarDecl &Var; 474 475 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info, 476 llvm::Value *Addr, const VarDecl *Var) 477 : CleanupFn(CleanupFn), FnInfo(*Info), Addr(Addr), Var(*Var) {} 478 479 void Emit(CodeGenFunction &CGF, bool IsForEH) { 480 // In some cases, the type of the function argument will be different from 481 // the type of the pointer. An example of this is 482 // void f(void* arg); 483 // __attribute__((cleanup(f))) void *g; 484 // 485 // To fix this we insert a bitcast here. 486 QualType ArgTy = FnInfo.arg_begin()->type; 487 llvm::Value *Arg = 488 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy)); 489 490 CallArgList Args; 491 Args.push_back(std::make_pair(RValue::get(Arg), 492 CGF.getContext().getPointerType(Var.getType()))); 493 CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args); 494 } 495 }; 496 497 struct CallBlockRelease : EHScopeStack::Cleanup { 498 llvm::Value *Addr; 499 CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {} 500 501 void Emit(CodeGenFunction &CGF, bool IsForEH) { 502 llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding"); 503 V = CGF.Builder.CreateLoad(V); 504 CGF.BuildBlockRelease(V); 505 } 506 }; 507} 508 509 510/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the 511/// non-zero parts of the specified initializer with equal or fewer than 512/// NumStores scalar stores. 513static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init, 514 unsigned &NumStores) { 515 // Zero and Undef never requires any extra stores. 516 if (isa<llvm::ConstantAggregateZero>(Init) || 517 isa<llvm::ConstantPointerNull>(Init) || 518 isa<llvm::UndefValue>(Init)) 519 return true; 520 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) || 521 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) || 522 isa<llvm::ConstantExpr>(Init)) 523 return Init->isNullValue() || NumStores--; 524 525 // See if we can emit each element. 526 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) { 527 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) { 528 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i)); 529 if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores)) 530 return false; 531 } 532 return true; 533 } 534 535 // Anything else is hard and scary. 536 return false; 537} 538 539/// emitStoresForInitAfterMemset - For inits that 540/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar 541/// stores that would be required. 542static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc, 543 CGBuilderTy &Builder) { 544 // Zero doesn't require any stores. 545 if (isa<llvm::ConstantAggregateZero>(Init) || 546 isa<llvm::ConstantPointerNull>(Init) || 547 isa<llvm::UndefValue>(Init)) 548 return; 549 550 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) || 551 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) || 552 isa<llvm::ConstantExpr>(Init)) { 553 if (!Init->isNullValue()) 554 Builder.CreateStore(Init, Loc); 555 return; 556 } 557 558 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) && 559 "Unknown value type!"); 560 561 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) { 562 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i)); 563 if (Elt->isNullValue()) continue; 564 565 // Otherwise, get a pointer to the element and emit it. 566 emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i), 567 Builder); 568 } 569} 570 571 572/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset 573/// plus some stores to initialize a local variable instead of using a memcpy 574/// from a constant global. It is beneficial to use memset if the global is all 575/// zeros, or mostly zeros and large. 576static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init, 577 uint64_t GlobalSize) { 578 // If a global is all zeros, always use a memset. 579 if (isa<llvm::ConstantAggregateZero>(Init)) return true; 580 581 582 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large, 583 // do it if it will require 6 or fewer scalar stores. 584 // TODO: Should budget depends on the size? Avoiding a large global warrants 585 // plopping in more stores. 586 unsigned StoreBudget = 6; 587 uint64_t SizeLimit = 32; 588 589 return GlobalSize > SizeLimit && 590 canEmitInitWithFewStoresAfterMemset(Init, StoreBudget); 591} 592 593 594/// EmitLocalVarDecl - Emit code and set up an entry in LocalDeclMap for a 595/// variable declaration with auto, register, or no storage class specifier. 596/// These turn into simple stack objects, or GlobalValues depending on target. 597void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D, 598 SpecialInitFn *SpecialInit) { 599 QualType Ty = D.getType(); 600 unsigned Alignment = getContext().getDeclAlign(&D).getQuantity(); 601 bool isByRef = D.hasAttr<BlocksAttr>(); 602 bool needsDispose = false; 603 CharUnits Align = CharUnits::Zero(); 604 bool IsSimpleConstantInitializer = false; 605 606 bool NRVO = false; 607 llvm::Value *NRVOFlag = 0; 608 llvm::Value *DeclPtr; 609 if (Ty->isConstantSizeType()) { 610 if (!Target.useGlobalsForAutomaticVariables()) { 611 NRVO = getContext().getLangOptions().ElideConstructors && 612 D.isNRVOVariable(); 613 // If this value is an array or struct, is POD, and if the initializer is 614 // a staticly determinable constant, try to optimize it (unless the NRVO 615 // is already optimizing this). 616 if (!NRVO && D.getInit() && !isByRef && 617 (Ty->isArrayType() || Ty->isRecordType()) && 618 Ty->isPODType() && 619 D.getInit()->isConstantInitializer(getContext(), false)) { 620 // If this variable is marked 'const', emit the value as a global. 621 if (CGM.getCodeGenOpts().MergeAllConstants && 622 Ty.isConstant(getContext())) { 623 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage); 624 return; 625 } 626 627 IsSimpleConstantInitializer = true; 628 } 629 630 // A normal fixed sized variable becomes an alloca in the entry block, 631 // unless it's an NRVO variable. 632 const llvm::Type *LTy = ConvertTypeForMem(Ty); 633 634 if (NRVO) { 635 // The named return value optimization: allocate this variable in the 636 // return slot, so that we can elide the copy when returning this 637 // variable (C++0x [class.copy]p34). 638 DeclPtr = ReturnValue; 639 640 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 641 if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) { 642 // Create a flag that is used to indicate when the NRVO was applied 643 // to this variable. Set it to zero to indicate that NRVO was not 644 // applied. 645 llvm::Value *Zero = Builder.getFalse(); 646 NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo"); 647 Builder.CreateStore(Zero, NRVOFlag); 648 649 // Record the NRVO flag for this variable. 650 NRVOFlags[&D] = NRVOFlag; 651 } 652 } 653 } else { 654 if (isByRef) 655 LTy = BuildByRefType(&D); 656 657 llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); 658 Alloc->setName(D.getNameAsString()); 659 660 Align = getContext().getDeclAlign(&D); 661 if (isByRef) 662 Align = std::max(Align, 663 CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)); 664 Alloc->setAlignment(Align.getQuantity()); 665 DeclPtr = Alloc; 666 } 667 } else { 668 // Targets that don't support recursion emit locals as globals. 669 const char *Class = 670 D.getStorageClass() == SC_Register ? ".reg." : ".auto."; 671 DeclPtr = CreateStaticVarDecl(D, Class, 672 llvm::GlobalValue::InternalLinkage); 673 } 674 675 // FIXME: Can this happen? 676 if (Ty->isVariablyModifiedType()) 677 EmitVLASize(Ty); 678 } else { 679 EnsureInsertPoint(); 680 681 if (!DidCallStackSave) { 682 // Save the stack. 683 const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext); 684 llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); 685 686 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); 687 llvm::Value *V = Builder.CreateCall(F); 688 689 Builder.CreateStore(V, Stack); 690 691 DidCallStackSave = true; 692 693 // Push a cleanup block and restore the stack there. 694 EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack); 695 } 696 697 // Get the element type. 698 const llvm::Type *LElemTy = ConvertTypeForMem(Ty); 699 const llvm::Type *LElemPtrTy = LElemTy->getPointerTo(Ty.getAddressSpace()); 700 701 llvm::Value *VLASize = EmitVLASize(Ty); 702 703 // Allocate memory for the array. 704 llvm::AllocaInst *VLA = 705 Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); 706 VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 707 708 DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); 709 } 710 711 llvm::Value *&DMEntry = LocalDeclMap[&D]; 712 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 713 DMEntry = DeclPtr; 714 715 // Emit debug info for local var declaration. 716 if (CGDebugInfo *DI = getDebugInfo()) { 717 assert(HaveInsertPoint() && "Unexpected unreachable point!"); 718 719 DI->setLocation(D.getLocation()); 720 if (Target.useGlobalsForAutomaticVariables()) { 721 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); 722 } else 723 DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); 724 } 725 726 // If this local has an initializer, emit it now. 727 const Expr *Init = D.getInit(); 728 729 // If we are at an unreachable point, we don't need to emit the initializer 730 // unless it contains a label. 731 if (!HaveInsertPoint()) { 732 if (!ContainsLabel(Init)) 733 Init = 0; 734 else 735 EnsureInsertPoint(); 736 } 737 738 if (isByRef) { 739 const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext); 740 741 EnsureInsertPoint(); 742 llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); 743 llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); 744 llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); 745 llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); 746 llvm::Value *V; 747 int flag = 0; 748 int flags = 0; 749 750 needsDispose = true; 751 752 if (Ty->isBlockPointerType()) { 753 flag |= BLOCK_FIELD_IS_BLOCK; 754 flags |= BLOCK_HAS_COPY_DISPOSE; 755 } else if (getContext().isObjCNSObjectType(Ty) || 756 Ty->isObjCObjectPointerType()) { 757 flag |= BLOCK_FIELD_IS_OBJECT; 758 flags |= BLOCK_HAS_COPY_DISPOSE; 759 } else if (getContext().getBlockVarCopyInits(&D)) { 760 flag |= BLOCK_HAS_CXX_OBJ; 761 flags |= BLOCK_HAS_COPY_DISPOSE; 762 } 763 764 // FIXME: Someone double check this. 765 if (Ty.isObjCGCWeak()) 766 flag |= BLOCK_FIELD_IS_WEAK; 767 768 int isa = 0; 769 if (flag & BLOCK_FIELD_IS_WEAK) 770 isa = 1; 771 V = Builder.CreateIntToPtr(Builder.getInt32(isa), PtrToInt8Ty, "isa"); 772 Builder.CreateStore(V, isa_field); 773 774 Builder.CreateStore(DeclPtr, forwarding_field); 775 776 Builder.CreateStore(Builder.getInt32(flags), flags_field); 777 778 const llvm::Type *V1; 779 V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); 780 V = Builder.getInt32(CGM.GetTargetTypeStoreSize(V1).getQuantity()); 781 Builder.CreateStore(V, size_field); 782 783 if (flags & BLOCK_HAS_COPY_DISPOSE) { 784 SynthesizeCopyDisposeHelpers = true; 785 llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); 786 Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, 787 Align.getQuantity(), &D), 788 copy_helper); 789 790 llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); 791 Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag, 792 Align.getQuantity(), &D), 793 destroy_helper); 794 } 795 } 796 797 if (SpecialInit) { 798 SpecialInit(*this, D, DeclPtr); 799 } else if (Init) { 800 llvm::Value *Loc = DeclPtr; 801 if (isByRef) 802 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 803 D.getNameAsString()); 804 805 bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified(); 806 807 // If the initializer was a simple constant initializer, we can optimize it 808 // in various ways. 809 if (IsSimpleConstantInitializer) { 810 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this); 811 assert(Init != 0 && "Wasn't a simple constant init?"); 812 813 llvm::Value *AlignVal = Builder.getInt32(Align.getQuantity()); 814 llvm::Value *SizeVal = 815 llvm::ConstantInt::get(CGF.IntPtrTy, 816 getContext().getTypeSizeInChars(Ty).getQuantity()); 817 818 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 819 if (Loc->getType() != BP) 820 Loc = Builder.CreateBitCast(Loc, BP, "tmp"); 821 822 llvm::Value *NotVolatile = Builder.getFalse(); 823 824 // If the initializer is all or mostly zeros, codegen with memset then do 825 // a few stores afterward. 826 if (shouldUseMemSetPlusStoresToInitialize(Init, 827 CGM.getTargetData().getTypeAllocSize(Init->getType()))) { 828 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 829 830 Builder.CreateCall5(CGM.getMemSetFn(BP, SizeVal->getType()), 831 Loc, Builder.getInt8(0), SizeVal, AlignVal, 832 NotVolatile); 833 if (!Init->isNullValue()) { 834 Loc = Builder.CreateBitCast(Loc, Init->getType()->getPointerTo()); 835 emitStoresForInitAfterMemset(Init, Loc, Builder); 836 } 837 838 } else { 839 // Otherwise, create a temporary global with the initializer then 840 // memcpy from the global to the alloca. 841 std::string Name = GetStaticDeclName(*this, D, "."); 842 llvm::GlobalVariable *GV = 843 new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true, 844 llvm::GlobalValue::InternalLinkage, 845 Init, Name, 0, false, 0); 846 GV->setAlignment(Align.getQuantity()); 847 848 llvm::Value *SrcPtr = GV; 849 if (SrcPtr->getType() != BP) 850 SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); 851 852 Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(), 853 SizeVal->getType()), 854 Loc, SrcPtr, SizeVal, AlignVal, NotVolatile); 855 } 856 } else if (Ty->isReferenceType()) { 857 RValue RV = EmitReferenceBindingToExpr(Init, &D); 858 EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty); 859 } else if (!hasAggregateLLVMType(Init->getType())) { 860 llvm::Value *V = EmitScalarExpr(Init); 861 EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty); 862 } else if (Init->getType()->isAnyComplexType()) { 863 EmitComplexExprIntoAddr(Init, Loc, isVolatile); 864 } else { 865 EmitAggExpr(Init, AggValueSlot::forAddr(Loc, isVolatile, true, false)); 866 } 867 } 868 869 // Handle CXX destruction of variables. 870 QualType DtorTy(Ty); 871 while (const ArrayType *Array = getContext().getAsArrayType(DtorTy)) 872 DtorTy = getContext().getBaseElementType(Array); 873 if (const RecordType *RT = DtorTy->getAs<RecordType>()) 874 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 875 if (!ClassDecl->hasTrivialDestructor()) { 876 // Note: We suppress the destructor call when the corresponding NRVO 877 // flag has been set. 878 llvm::Value *Loc = DeclPtr; 879 if (isByRef) 880 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 881 D.getNameAsString()); 882 883 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 884 assert(D && "EmitLocalBlockVarDecl - destructor is nul"); 885 886 if (const ConstantArrayType *Array = 887 getContext().getAsConstantArrayType(Ty)) { 888 EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup, 889 D, Array, Loc); 890 } else { 891 EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup, 892 D, NRVOFlag, Loc); 893 } 894 } 895 } 896 897 // Handle the cleanup attribute 898 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { 899 const FunctionDecl *FD = CA->getFunctionDecl(); 900 901 llvm::Constant* F = CGM.GetAddrOfFunction(FD); 902 assert(F && "Could not find function!"); 903 904 const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); 905 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, 906 F, &Info, DeclPtr, &D); 907 } 908 909 // If this is a block variable, clean it up. 910 if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) 911 EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, DeclPtr); 912} 913 914/// Emit an alloca (or GlobalValue depending on target) 915/// for the specified parameter and set up LocalDeclMap. 916void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) { 917 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? 918 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && 919 "Invalid argument to EmitParmDecl"); 920 QualType Ty = D.getType(); 921 CanQualType CTy = getContext().getCanonicalType(Ty); 922 923 llvm::Value *DeclPtr; 924 // If this is an aggregate or variable sized value, reuse the input pointer. 925 if (!Ty->isConstantSizeType() || 926 CodeGenFunction::hasAggregateLLVMType(Ty)) { 927 DeclPtr = Arg; 928 } else { 929 // Otherwise, create a temporary to hold the value. 930 DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr"); 931 932 // Store the initial value into the alloca. 933 unsigned Alignment = getContext().getDeclAlign(&D).getQuantity(); 934 EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Alignment, Ty); 935 } 936 Arg->setName(D.getName()); 937 938 llvm::Value *&DMEntry = LocalDeclMap[&D]; 939 assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); 940 DMEntry = DeclPtr; 941 942 // Emit debug info for param declaration. 943 if (CGDebugInfo *DI = getDebugInfo()) { 944 DI->setLocation(D.getLocation()); 945 DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder); 946 } 947} 948