CGBlocks.cpp revision b1a6e687967105bf1e18dfba196d0248e6700a4e
1//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit blocks. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CGDebugInfo.h" 15#include "CodeGenFunction.h" 16#include "CodeGenModule.h" 17#include "clang/AST/DeclObjC.h" 18#include "llvm/Module.h" 19#include "llvm/Target/TargetData.h" 20#include <algorithm> 21#include <cstdio> 22 23using namespace clang; 24using namespace CodeGen; 25 26llvm::Constant *CodeGenFunction:: 27BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size, 28 const llvm::StructType* Ty, 29 std::vector<HelperInfo> *NoteForHelper) { 30 const llvm::Type *UnsignedLongTy 31 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 32 llvm::Constant *C; 33 std::vector<llvm::Constant*> Elts; 34 35 // reserved 36 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 37 Elts.push_back(C); 38 39 // Size 40 // FIXME: What is the right way to say this doesn't fit? We should give 41 // a user diagnostic in that case. Better fix would be to change the 42 // API to size_t. 43 C = llvm::ConstantInt::get(UnsignedLongTy, Size); 44 Elts.push_back(C); 45 46 if (BlockHasCopyDispose) { 47 // copy_func_helper_decl 48 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 49 50 // destroy_func_decl 51 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 52 } 53 54 C = llvm::ConstantStruct::get(VMContext, Elts, false); 55 56 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 57 llvm::GlobalValue::InternalLinkage, 58 C, "__block_descriptor_tmp"); 59 return C; 60} 61 62llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 63 if (NSConcreteGlobalBlock == 0) 64 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 65 "_NSConcreteGlobalBlock"); 66 return NSConcreteGlobalBlock; 67} 68 69llvm::Constant *BlockModule::getNSConcreteStackBlock() { 70 if (NSConcreteStackBlock == 0) 71 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 72 "_NSConcreteStackBlock"); 73 return NSConcreteStackBlock; 74} 75 76static void CollectBlockDeclRefInfo(const Stmt *S, 77 CodeGenFunction::BlockInfo &Info) { 78 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 79 I != E; ++I) 80 if (*I) 81 CollectBlockDeclRefInfo(*I, Info); 82 83 if (const BlockDeclRefExpr *DE = dyn_cast<BlockDeclRefExpr>(S)) { 84 // FIXME: Handle enums. 85 if (isa<FunctionDecl>(DE->getDecl())) 86 return; 87 88 if (DE->isByRef()) 89 Info.ByRefDeclRefs.push_back(DE); 90 else 91 Info.ByCopyDeclRefs.push_back(DE); 92 } 93} 94 95/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be 96/// declared as a global variable instead of on the stack. 97static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) { 98 return Info.ByRefDeclRefs.empty() && Info.ByCopyDeclRefs.empty(); 99} 100 101// FIXME: Push most into CGM, passing down a few bits, like current function 102// name. 103llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 104 105 std::string Name = CurFn->getName(); 106 CodeGenFunction::BlockInfo Info(0, Name.c_str()); 107 CollectBlockDeclRefInfo(BE->getBody(), Info); 108 109 // Check if the block can be global. 110 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 111 // to just have one code path. We should move this function into CGM and pass 112 // CGF, then we can just check to see if CGF is 0. 113 if (0 && CanBlockBeGlobal(Info)) 114 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 115 116 std::vector<llvm::Constant*> Elts(5); 117 llvm::Constant *C; 118 llvm::Value *V; 119 120 { 121 // C = BuildBlockStructInitlist(); 122 unsigned int flags = BLOCK_HAS_DESCRIPTOR; 123 124 // We run this first so that we set BlockHasCopyDispose from the entire 125 // block literal. 126 // __invoke 127 uint64_t subBlockSize, subBlockAlign; 128 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 129 bool subBlockHasCopyDispose = false; 130 llvm::Function *Fn 131 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, 132 LocalDeclMap, 133 subBlockSize, 134 subBlockAlign, 135 subBlockDeclRefDecls, 136 subBlockHasCopyDispose); 137 BlockHasCopyDispose |= subBlockHasCopyDispose; 138 Elts[3] = Fn; 139 140 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 141 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 142 if (subBlockHasCopyDispose) 143 flags |= BLOCK_HAS_COPY_DISPOSE; 144 145 // __isa 146 C = CGM.getNSConcreteStackBlock(); 147 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 148 Elts[0] = C; 149 150 // __flags 151 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 152 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 153 C = llvm::ConstantInt::get(IntTy, flags); 154 Elts[1] = C; 155 156 // __reserved 157 C = llvm::ConstantInt::get(IntTy, 0); 158 Elts[2] = C; 159 160 if (subBlockDeclRefDecls.size() == 0) { 161 // __descriptor 162 Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 0, 0); 163 164 // Optimize to being a global block. 165 Elts[0] = CGM.getNSConcreteGlobalBlock(); 166 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 167 168 C = llvm::ConstantStruct::get(VMContext, Elts, false); 169 170 char Name[32]; 171 sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount()); 172 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 173 llvm::GlobalValue::InternalLinkage, 174 C, Name); 175 QualType BPT = BE->getType(); 176 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 177 return C; 178 } 179 180 std::vector<const llvm::Type *> Types(5+subBlockDeclRefDecls.size()); 181 for (int i=0; i<4; ++i) 182 Types[i] = Elts[i]->getType(); 183 Types[4] = PtrToInt8Ty; 184 185 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) { 186 const Expr *E = subBlockDeclRefDecls[i]; 187 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 188 QualType Ty = E->getType(); 189 if (BDRE && BDRE->isByRef()) { 190 Types[i+5] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); 191 } else 192 Types[i+5] = ConvertType(Ty); 193 } 194 195 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true); 196 197 llvm::AllocaInst *A = CreateTempAlloca(Ty); 198 A->setAlignment(subBlockAlign); 199 V = A; 200 201 std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size()); 202 int helpersize = 0; 203 204 for (unsigned i=0; i<4; ++i) 205 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 206 207 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) 208 { 209 // FIXME: Push const down. 210 Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]); 211 DeclRefExpr *DR; 212 ValueDecl *VD; 213 214 DR = dyn_cast<DeclRefExpr>(E); 215 // Skip padding. 216 if (DR) continue; 217 218 BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 219 VD = BDRE->getDecl(); 220 221 llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp"); 222 NoteForHelper[helpersize].index = i+5; 223 NoteForHelper[helpersize].RequiresCopying 224 = BlockRequiresCopying(VD->getType()); 225 NoteForHelper[helpersize].flag 226 = (VD->getType()->isBlockPointerType() 227 ? BLOCK_FIELD_IS_BLOCK 228 : BLOCK_FIELD_IS_OBJECT); 229 230 if (LocalDeclMap[VD]) { 231 if (BDRE->isByRef()) { 232 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 233 // FIXME: Someone double check this. 234 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 235 llvm::Value *Loc = LocalDeclMap[VD]; 236 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 237 Loc = Builder.CreateLoad(Loc, false); 238 Builder.CreateStore(Loc, Addr); 239 ++helpersize; 240 continue; 241 } else 242 E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD), 243 VD->getType(), SourceLocation(), 244 false, false); 245 } 246 if (BDRE->isByRef()) { 247 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 248 // FIXME: Someone double check this. 249 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 250 E = new (getContext()) 251 UnaryOperator(E, UnaryOperator::AddrOf, 252 getContext().getPointerType(E->getType()), 253 SourceLocation()); 254 } 255 ++helpersize; 256 257 RValue r = EmitAnyExpr(E, Addr, false); 258 if (r.isScalar()) { 259 llvm::Value *Loc = r.getScalarVal(); 260 const llvm::Type *Ty = Types[i+5]; 261 if (BDRE->isByRef()) { 262 // E is now the address of the value field, instead, we want the 263 // address of the actual ByRef struct. We optimize this slightly 264 // compared to gcc by not grabbing the forwarding slot as this must 265 // be done during Block_copy for us, and we can postpone the work 266 // until then. 267 uint64_t offset = BlockDecls[BDRE->getDecl()]; 268 269 llvm::Value *BlockLiteral = LoadBlockStruct(); 270 271 Loc = Builder.CreateGEP(BlockLiteral, 272 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 273 offset), 274 "block.literal"); 275 Ty = llvm::PointerType::get(Ty, 0); 276 Loc = Builder.CreateBitCast(Loc, Ty); 277 Loc = Builder.CreateLoad(Loc, false); 278 // Loc = Builder.CreateBitCast(Loc, Ty); 279 } 280 Builder.CreateStore(Loc, Addr); 281 } else if (r.isComplex()) 282 // FIXME: implement 283 ErrorUnsupported(BE, "complex in block literal"); 284 else if (r.isAggregate()) 285 ; // Already created into the destination 286 else 287 assert (0 && "bad block variable"); 288 // FIXME: Ensure that the offset created by the backend for 289 // the struct matches the previously computed offset in BlockDecls. 290 } 291 NoteForHelper.resize(helpersize); 292 293 // __descriptor 294 llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose, 295 subBlockSize, Ty, 296 &NoteForHelper); 297 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 298 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 299 } 300 301 QualType BPT = BE->getType(); 302 return Builder.CreateBitCast(V, ConvertType(BPT)); 303} 304 305 306const llvm::Type *BlockModule::getBlockDescriptorType() { 307 if (BlockDescriptorType) 308 return BlockDescriptorType; 309 310 const llvm::Type *UnsignedLongTy = 311 getTypes().ConvertType(getContext().UnsignedLongTy); 312 313 // struct __block_descriptor { 314 // unsigned long reserved; 315 // unsigned long block_size; 316 // }; 317 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(), 318 UnsignedLongTy, 319 UnsignedLongTy, 320 NULL); 321 322 getModule().addTypeName("struct.__block_descriptor", 323 BlockDescriptorType); 324 325 return BlockDescriptorType; 326} 327 328const llvm::Type *BlockModule::getGenericBlockLiteralType() { 329 if (GenericBlockLiteralType) 330 return GenericBlockLiteralType; 331 332 const llvm::Type *BlockDescPtrTy = 333 llvm::PointerType::getUnqual(getBlockDescriptorType()); 334 335 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 336 getTypes().ConvertType(getContext().IntTy)); 337 338 // struct __block_literal_generic { 339 // void *__isa; 340 // int __flags; 341 // int __reserved; 342 // void (*__invoke)(void *); 343 // struct __block_descriptor *__descriptor; 344 // }; 345 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 346 PtrToInt8Ty, 347 IntTy, 348 IntTy, 349 PtrToInt8Ty, 350 BlockDescPtrTy, 351 NULL); 352 353 getModule().addTypeName("struct.__block_literal_generic", 354 GenericBlockLiteralType); 355 356 return GenericBlockLiteralType; 357} 358 359const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { 360 if (GenericExtendedBlockLiteralType) 361 return GenericExtendedBlockLiteralType; 362 363 const llvm::Type *BlockDescPtrTy = 364 llvm::PointerType::getUnqual(getBlockDescriptorType()); 365 366 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 367 getTypes().ConvertType(getContext().IntTy)); 368 369 // struct __block_literal_generic { 370 // void *__isa; 371 // int __flags; 372 // int __reserved; 373 // void (*__invoke)(void *); 374 // struct __block_descriptor *__descriptor; 375 // void *__copy_func_helper_decl; 376 // void *__destroy_func_decl; 377 // }; 378 GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 379 PtrToInt8Ty, 380 IntTy, 381 IntTy, 382 PtrToInt8Ty, 383 BlockDescPtrTy, 384 PtrToInt8Ty, 385 PtrToInt8Ty, 386 NULL); 387 388 getModule().addTypeName("struct.__block_literal_extended_generic", 389 GenericExtendedBlockLiteralType); 390 391 return GenericExtendedBlockLiteralType; 392} 393 394bool BlockFunction::BlockRequiresCopying(QualType Ty) { 395 return CGM.BlockRequiresCopying(Ty); 396} 397 398RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { 399 const BlockPointerType *BPT = 400 E->getCallee()->getType()->getAs<BlockPointerType>(); 401 402 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 403 404 // Get a pointer to the generic block literal. 405 const llvm::Type *BlockLiteralTy = 406 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 407 408 // Bitcast the callee to a block literal. 409 llvm::Value *BlockLiteral = 410 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 411 412 // Get the function pointer from the literal. 413 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 414 415 BlockLiteral = 416 Builder.CreateBitCast(BlockLiteral, 417 llvm::PointerType::getUnqual( 418 llvm::Type::getInt8Ty(VMContext)), 419 "tmp"); 420 421 // Add the block literal. 422 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 423 CallArgList Args; 424 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 425 426 QualType FnType = BPT->getPointeeType(); 427 428 // And the rest of the arguments. 429 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), 430 E->arg_begin(), E->arg_end()); 431 432 // Load the function. 433 llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp"); 434 435 QualType ResultType = FnType->getAs<FunctionType>()->getResultType(); 436 437 const CGFunctionInfo &FnInfo = 438 CGM.getTypes().getFunctionInfo(ResultType, Args); 439 440 // Cast the function pointer to the right type. 441 const llvm::Type *BlockFTy = 442 CGM.getTypes().GetFunctionType(FnInfo, false); 443 444 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 445 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 446 447 // And call the block. 448 return EmitCall(FnInfo, Func, Args); 449} 450 451llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { 452 const ValueDecl *VD = E->getDecl(); 453 454 uint64_t &offset = BlockDecls[VD]; 455 456 457 // See if we have already allocated an offset for this variable. 458 if (offset == 0) { 459 // Don't run the expensive check, unless we have to. 460 if (!BlockHasCopyDispose && BlockRequiresCopying(E->getType())) 461 BlockHasCopyDispose = true; 462 // if not, allocate one now. 463 offset = getBlockOffset(E); 464 } 465 466 llvm::Value *BlockLiteral = LoadBlockStruct(); 467 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 468 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 469 offset), 470 "block.literal"); 471 if (E->isByRef()) { 472 const llvm::Type *PtrStructTy 473 = llvm::PointerType::get(BuildByRefType(VD), 0); 474 // The block literal will need a copy/destroy helper. 475 BlockHasCopyDispose = true; 476 477 const llvm::Type *Ty = PtrStructTy; 478 Ty = llvm::PointerType::get(Ty, 0); 479 V = Builder.CreateBitCast(V, Ty); 480 V = Builder.CreateLoad(V, false); 481 V = Builder.CreateStructGEP(V, 1, "forwarding"); 482 V = Builder.CreateLoad(V, false); 483 V = Builder.CreateBitCast(V, PtrStructTy); 484 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 485 VD->getNameAsString()); 486 } else { 487 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); 488 489 Ty = llvm::PointerType::get(Ty, 0); 490 V = Builder.CreateBitCast(V, Ty); 491 } 492 return V; 493} 494 495void CodeGenFunction::BlockForwardSelf() { 496 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 497 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl(); 498 llvm::Value *&DMEntry = LocalDeclMap[SelfDecl]; 499 if (DMEntry) 500 return; 501 // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care 502 BlockDeclRefExpr *BDRE = new (getContext()) 503 BlockDeclRefExpr(SelfDecl, 504 SelfDecl->getType(), SourceLocation(), false); 505 DMEntry = GetAddrOfBlockDecl(BDRE); 506} 507 508llvm::Constant * 509BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 510 // Generate the block descriptor. 511 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 512 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 513 getTypes().ConvertType(getContext().IntTy)); 514 515 llvm::Constant *DescriptorFields[2]; 516 517 // Reserved 518 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 519 520 // Block literal size. For global blocks we just use the size of the generic 521 // block literal struct. 522 uint64_t BlockLiteralSize = 523 TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8; 524 DescriptorFields[1] = 525 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize); 526 527 llvm::Constant *DescriptorStruct = 528 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false); 529 530 llvm::GlobalVariable *Descriptor = 531 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true, 532 llvm::GlobalVariable::InternalLinkage, 533 DescriptorStruct, "__block_descriptor_global"); 534 535 // Generate the constants for the block literal. 536 llvm::Constant *LiteralFields[5]; 537 538 CodeGenFunction::BlockInfo Info(0, n); 539 uint64_t subBlockSize, subBlockAlign; 540 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 541 bool subBlockHasCopyDispose = false; 542 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 543 llvm::Function *Fn 544 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap, 545 subBlockSize, 546 subBlockAlign, 547 subBlockDeclRefDecls, 548 subBlockHasCopyDispose); 549 assert(subBlockSize == BlockLiteralSize 550 && "no imports allowed for global block"); 551 552 // isa 553 LiteralFields[0] = getNSConcreteGlobalBlock(); 554 555 // Flags 556 LiteralFields[1] = 557 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR); 558 559 // Reserved 560 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 561 562 // Function 563 LiteralFields[3] = Fn; 564 565 // Descriptor 566 LiteralFields[4] = Descriptor; 567 568 llvm::Constant *BlockLiteralStruct = 569 llvm::ConstantStruct::get(VMContext, &LiteralFields[0], 5, false); 570 571 llvm::GlobalVariable *BlockLiteral = 572 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true, 573 llvm::GlobalVariable::InternalLinkage, 574 BlockLiteralStruct, "__block_literal_global"); 575 576 return BlockLiteral; 577} 578 579llvm::Value *CodeGenFunction::LoadBlockStruct() { 580 return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self"); 581} 582 583llvm::Function * 584CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, 585 const BlockInfo& Info, 586 const Decl *OuterFuncDecl, 587 llvm::DenseMap<const Decl*, llvm::Value*> ldm, 588 uint64_t &Size, 589 uint64_t &Align, 590 llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls, 591 bool &subBlockHasCopyDispose) { 592 593 // Check if we should generate debug info for this block. 594 if (CGM.getDebugInfo()) 595 DebugInfo = CGM.getDebugInfo(); 596 597 // Arrange for local static and local extern declarations to appear 598 // to be local to this function as well, as they are directly referenced 599 // in a block. 600 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 601 i != ldm.end(); 602 ++i) { 603 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 604 605 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 606 LocalDeclMap[VD] = i->second; 607 } 608 609 // FIXME: We need to rearrange the code for copy/dispose so we have this 610 // sooner, so we can calculate offsets correctly. 611 if (!BlockHasCopyDispose) 612 BlockOffset = CGM.getTargetData() 613 .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8; 614 else 615 BlockOffset = CGM.getTargetData() 616 .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8; 617 BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 618 619 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 620 QualType ResultType; 621 bool IsVariadic; 622 if (const FunctionProtoType *FTy = 623 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 624 ResultType = FTy->getResultType(); 625 IsVariadic = FTy->isVariadic(); 626 } else { 627 // K&R style block. 628 ResultType = BlockFunctionType->getResultType(); 629 IsVariadic = false; 630 } 631 632 FunctionArgList Args; 633 634 const BlockDecl *BD = BExpr->getBlockDecl(); 635 636 // FIXME: This leaks 637 ImplicitParamDecl *SelfDecl = 638 ImplicitParamDecl::Create(getContext(), 0, 639 SourceLocation(), 0, 640 getContext().getPointerType(getContext().VoidTy)); 641 642 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 643 BlockStructDecl = SelfDecl; 644 645 for (BlockDecl::param_const_iterator i = BD->param_begin(), 646 e = BD->param_end(); i != e; ++i) 647 Args.push_back(std::make_pair(*i, (*i)->getType())); 648 649 const CGFunctionInfo &FI = 650 CGM.getTypes().getFunctionInfo(ResultType, Args); 651 652 std::string Name = std::string("__") + Info.Name + "_block_invoke_"; 653 CodeGenTypes &Types = CGM.getTypes(); 654 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 655 656 llvm::Function *Fn = 657 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 658 Name, 659 &CGM.getModule()); 660 661 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 662 663 StartFunction(BD, ResultType, Fn, Args, 664 BExpr->getBody()->getLocEnd()); 665 666 // Save a spot to insert the debug information for all the BlockDeclRefDecls. 667 llvm::BasicBlock *entry = Builder.GetInsertBlock(); 668 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); 669 670 CurFuncDecl = OuterFuncDecl; 671 CurCodeDecl = BD; 672 EmitStmt(BExpr->getBody()); 673 674 if (CGDebugInfo *DI = getDebugInfo()) { 675 llvm::BasicBlock *end = Builder.GetInsertBlock(); 676 llvm::BasicBlock::iterator end_ptr = Builder.GetInsertPoint(); 677 678 // Emit debug information for all the BlockDeclRefDecls. 679 // First, go back to the entry... 680 Builder.SetInsertPoint(entry, entry_ptr); 681 682 // And then insert the debug information.. 683 for (unsigned i=0; i < BlockDeclRefDecls.size(); ++i) { 684 const Expr *E = BlockDeclRefDecls[i]; 685 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 686 if (BDRE) { 687 const ValueDecl *D = BDRE->getDecl(); 688 DI->setLocation(D->getLocation()); 689 DI->EmitDeclareOfBlockDeclRefVariable(BDRE, 690 LocalDeclMap[getBlockStructDecl()], 691 Builder, this); 692 } 693 } 694 695 // Then go back to the end, and we're done. 696 Builder.SetInsertPoint(end, end_ptr); 697 } 698 699 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 700 701 // The runtime needs a minimum alignment of a void *. 702 uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 703 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign); 704 705 Size = BlockOffset; 706 Align = BlockAlign; 707 subBlockDeclRefDecls = BlockDeclRefDecls; 708 subBlockHasCopyDispose |= BlockHasCopyDispose; 709 return Fn; 710} 711 712uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { 713 const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl()); 714 715 uint64_t Size = getContext().getTypeSize(D->getType()) / 8; 716 uint64_t Align = getContext().getDeclAlignInBytes(D); 717 718 if (BDRE->isByRef()) { 719 Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8; 720 Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 721 } 722 723 assert ((Align > 0) && "alignment must be 1 byte or more"); 724 725 uint64_t OldOffset = BlockOffset; 726 727 // Ensure proper alignment, even if it means we have to have a gap 728 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align); 729 BlockAlign = std::max(Align, BlockAlign); 730 731 uint64_t Pad = BlockOffset - OldOffset; 732 if (Pad) { 733 llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad); 734 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 735 llvm::APInt(32, Pad), 736 ArrayType::Normal, 0); 737 ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(), 738 0, QualType(PadTy), 0, VarDecl::None); 739 Expr *E; 740 E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 741 SourceLocation(), false, false); 742 BlockDeclRefDecls.push_back(E); 743 } 744 BlockDeclRefDecls.push_back(BDRE); 745 746 BlockOffset += Size; 747 return BlockOffset-Size; 748} 749 750llvm::Constant *BlockFunction:: 751GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 752 std::vector<HelperInfo> *NoteForHelperp) { 753 QualType R = getContext().VoidTy; 754 755 FunctionArgList Args; 756 // FIXME: This leaks 757 ImplicitParamDecl *Dst = 758 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 759 getContext().getPointerType(getContext().VoidTy)); 760 Args.push_back(std::make_pair(Dst, Dst->getType())); 761 ImplicitParamDecl *Src = 762 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 763 getContext().getPointerType(getContext().VoidTy)); 764 Args.push_back(std::make_pair(Src, Src->getType())); 765 766 const CGFunctionInfo &FI = 767 CGM.getTypes().getFunctionInfo(R, Args); 768 769 // FIXME: We'd like to put these into a mergable by content, with 770 // internal linkage. 771 std::string Name = std::string("__copy_helper_block_"); 772 CodeGenTypes &Types = CGM.getTypes(); 773 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 774 775 llvm::Function *Fn = 776 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 777 Name, 778 &CGM.getModule()); 779 780 IdentifierInfo *II 781 = &CGM.getContext().Idents.get("__copy_helper_block_"); 782 783 FunctionDecl *FD = FunctionDecl::Create(getContext(), 784 getContext().getTranslationUnitDecl(), 785 SourceLocation(), II, R, 0, 786 FunctionDecl::Static, false, 787 true); 788 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 789 790 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 791 llvm::Type *PtrPtrT; 792 793 if (NoteForHelperp) { 794 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 795 796 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 797 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 798 SrcObj = Builder.CreateLoad(SrcObj); 799 800 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 801 llvm::Type *PtrPtrT; 802 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 803 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 804 DstObj = Builder.CreateLoad(DstObj); 805 806 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 807 int flag = NoteForHelper[i].flag; 808 int index = NoteForHelper[i].index; 809 810 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 811 || NoteForHelper[i].RequiresCopying) { 812 llvm::Value *Srcv = SrcObj; 813 Srcv = Builder.CreateStructGEP(Srcv, index); 814 Srcv = Builder.CreateBitCast(Srcv, 815 llvm::PointerType::get(PtrToInt8Ty, 0)); 816 Srcv = Builder.CreateLoad(Srcv); 817 818 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 819 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 820 821 llvm::Value *N = llvm::ConstantInt::get( 822 llvm::Type::getInt32Ty(T->getContext()), flag); 823 llvm::Value *F = getBlockObjectAssign(); 824 Builder.CreateCall3(F, Dstv, Srcv, N); 825 } 826 } 827 } 828 829 CGF.FinishFunction(); 830 831 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 832} 833 834llvm::Constant *BlockFunction:: 835GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 836 const llvm::StructType* T, 837 std::vector<HelperInfo> *NoteForHelperp) { 838 QualType R = getContext().VoidTy; 839 840 FunctionArgList Args; 841 // FIXME: This leaks 842 ImplicitParamDecl *Src = 843 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 844 getContext().getPointerType(getContext().VoidTy)); 845 846 Args.push_back(std::make_pair(Src, Src->getType())); 847 848 const CGFunctionInfo &FI = 849 CGM.getTypes().getFunctionInfo(R, Args); 850 851 // FIXME: We'd like to put these into a mergable by content, with 852 // internal linkage. 853 std::string Name = std::string("__destroy_helper_block_"); 854 CodeGenTypes &Types = CGM.getTypes(); 855 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 856 857 llvm::Function *Fn = 858 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 859 Name, 860 &CGM.getModule()); 861 862 IdentifierInfo *II 863 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 864 865 FunctionDecl *FD = FunctionDecl::Create(getContext(), 866 getContext().getTranslationUnitDecl(), 867 SourceLocation(), II, R, 0, 868 FunctionDecl::Static, false, 869 true); 870 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 871 872 if (NoteForHelperp) { 873 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 874 875 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 876 llvm::Type *PtrPtrT; 877 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 878 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 879 SrcObj = Builder.CreateLoad(SrcObj); 880 881 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 882 int flag = NoteForHelper[i].flag; 883 int index = NoteForHelper[i].index; 884 885 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 886 || NoteForHelper[i].RequiresCopying) { 887 llvm::Value *Srcv = SrcObj; 888 Srcv = Builder.CreateStructGEP(Srcv, index); 889 Srcv = Builder.CreateBitCast(Srcv, 890 llvm::PointerType::get(PtrToInt8Ty, 0)); 891 Srcv = Builder.CreateLoad(Srcv); 892 893 BuildBlockRelease(Srcv, flag); 894 } 895 } 896 } 897 898 CGF.FinishFunction(); 899 900 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 901} 902 903llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 904 std::vector<HelperInfo> *NoteForHelper) { 905 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 906 T, NoteForHelper); 907} 908 909llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 910 std::vector<HelperInfo> *NoteForHelperp) { 911 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 912 T, NoteForHelperp); 913} 914 915llvm::Constant *BlockFunction:: 916GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 917 QualType R = getContext().VoidTy; 918 919 FunctionArgList Args; 920 // FIXME: This leaks 921 ImplicitParamDecl *Dst = 922 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 923 getContext().getPointerType(getContext().VoidTy)); 924 Args.push_back(std::make_pair(Dst, Dst->getType())); 925 926 // FIXME: This leaks 927 ImplicitParamDecl *Src = 928 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 929 getContext().getPointerType(getContext().VoidTy)); 930 Args.push_back(std::make_pair(Src, Src->getType())); 931 932 const CGFunctionInfo &FI = 933 CGM.getTypes().getFunctionInfo(R, Args); 934 935 std::string Name = std::string("__Block_byref_id_object_copy_"); 936 CodeGenTypes &Types = CGM.getTypes(); 937 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 938 939 // FIXME: We'd like to put these into a mergable by content, with 940 // internal linkage. 941 llvm::Function *Fn = 942 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 943 Name, 944 &CGM.getModule()); 945 946 IdentifierInfo *II 947 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 948 949 FunctionDecl *FD = FunctionDecl::Create(getContext(), 950 getContext().getTranslationUnitDecl(), 951 SourceLocation(), II, R, 0, 952 FunctionDecl::Static, false, 953 true); 954 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 955 956 // dst->x 957 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 958 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 959 V = Builder.CreateLoad(V); 960 V = Builder.CreateStructGEP(V, 6, "x"); 961 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 962 963 // src->x 964 V = CGF.GetAddrOfLocalVar(Src); 965 V = Builder.CreateLoad(V); 966 V = Builder.CreateBitCast(V, T); 967 V = Builder.CreateStructGEP(V, 6, "x"); 968 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 969 llvm::Value *SrcObj = Builder.CreateLoad(V); 970 971 flag |= BLOCK_BYREF_CALLER; 972 973 llvm::Value *N = llvm::ConstantInt::get( 974 llvm::Type::getInt32Ty(T->getContext()), flag); 975 llvm::Value *F = getBlockObjectAssign(); 976 Builder.CreateCall3(F, DstObj, SrcObj, N); 977 978 CGF.FinishFunction(); 979 980 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 981} 982 983llvm::Constant * 984BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 985 int flag) { 986 QualType R = getContext().VoidTy; 987 988 FunctionArgList Args; 989 // FIXME: This leaks 990 ImplicitParamDecl *Src = 991 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 992 getContext().getPointerType(getContext().VoidTy)); 993 994 Args.push_back(std::make_pair(Src, Src->getType())); 995 996 const CGFunctionInfo &FI = 997 CGM.getTypes().getFunctionInfo(R, Args); 998 999 std::string Name = std::string("__Block_byref_id_object_dispose_"); 1000 CodeGenTypes &Types = CGM.getTypes(); 1001 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1002 1003 // FIXME: We'd like to put these into a mergable by content, with 1004 // internal linkage. 1005 llvm::Function *Fn = 1006 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1007 Name, 1008 &CGM.getModule()); 1009 1010 IdentifierInfo *II 1011 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 1012 1013 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1014 getContext().getTranslationUnitDecl(), 1015 SourceLocation(), II, R, 0, 1016 FunctionDecl::Static, false, 1017 true); 1018 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1019 1020 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 1021 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1022 V = Builder.CreateLoad(V); 1023 V = Builder.CreateStructGEP(V, 6, "x"); 1024 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1025 V = Builder.CreateLoad(V); 1026 1027 flag |= BLOCK_BYREF_CALLER; 1028 BuildBlockRelease(V, flag); 1029 CGF.FinishFunction(); 1030 1031 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1032} 1033 1034llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 1035 int flag, unsigned Align) { 1036 // All alignments below that of pointer alignment collpase down to just 1037 // pointer alignment, as we always have at least that much alignment to begin 1038 // with. 1039 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1040 // As an optimization, we only generate a single function of each kind we 1041 // might need. We need a different one for each alignment and for each 1042 // setting of flags. We mix Align and flag to get the kind. 1043 uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag; 1044 llvm::Constant *& Entry = CGM.AssignCache[kind]; 1045 if (Entry) 1046 return Entry; 1047 return Entry=CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag); 1048} 1049 1050llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1051 int flag, 1052 unsigned Align) { 1053 // All alignments below that of pointer alignment collpase down to just 1054 // pointer alignment, as we always have at least that much alignment to begin 1055 // with. 1056 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1057 // As an optimization, we only generate a single function of each kind we 1058 // might need. We need a different one for each alignment and for each 1059 // setting of flags. We mix Align and flag to get the kind. 1060 uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag; 1061 llvm::Constant *& Entry = CGM.DestroyCache[kind]; 1062 if (Entry) 1063 return Entry; 1064 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag); 1065} 1066 1067llvm::Value *BlockFunction::getBlockObjectDispose() { 1068 if (CGM.BlockObjectDispose == 0) { 1069 const llvm::FunctionType *FTy; 1070 std::vector<const llvm::Type*> ArgTys; 1071 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1072 ArgTys.push_back(PtrToInt8Ty); 1073 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1074 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1075 CGM.BlockObjectDispose 1076 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1077 } 1078 return CGM.BlockObjectDispose; 1079} 1080 1081llvm::Value *BlockFunction::getBlockObjectAssign() { 1082 if (CGM.BlockObjectAssign == 0) { 1083 const llvm::FunctionType *FTy; 1084 std::vector<const llvm::Type*> ArgTys; 1085 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1086 ArgTys.push_back(PtrToInt8Ty); 1087 ArgTys.push_back(PtrToInt8Ty); 1088 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1089 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1090 CGM.BlockObjectAssign 1091 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1092 } 1093 return CGM.BlockObjectAssign; 1094} 1095 1096void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1097 llvm::Value *F = getBlockObjectDispose(); 1098 llvm::Value *N; 1099 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1100 N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); 1101 Builder.CreateCall2(F, V, N); 1102} 1103 1104ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1105 1106BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1107 CGBuilderTy &B) 1108 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { 1109 PtrToInt8Ty = llvm::PointerType::getUnqual( 1110 llvm::Type::getInt8Ty(VMContext)); 1111 1112 BlockHasCopyDispose = false; 1113} 1114