1//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This abstract class defines the interface for Objective-C runtime-specific 11// code generation. It provides some concrete helper methods for functionality 12// shared between all (or most) of the Objective-C runtimes supported by clang. 13// 14//===----------------------------------------------------------------------===// 15 16#include "CGObjCRuntime.h" 17#include "CGCleanup.h" 18#include "CGRecordLayout.h" 19#include "CodeGenFunction.h" 20#include "CodeGenModule.h" 21#include "clang/AST/RecordLayout.h" 22#include "clang/AST/StmtObjC.h" 23#include "clang/CodeGen/CGFunctionInfo.h" 24#include "llvm/IR/CallSite.h" 25 26using namespace clang; 27using namespace CodeGen; 28 29static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM, 30 const ObjCInterfaceDecl *OID, 31 const ObjCImplementationDecl *ID, 32 const ObjCIvarDecl *Ivar) { 33 const ObjCInterfaceDecl *Container = Ivar->getContainingInterface(); 34 35 // FIXME: We should eliminate the need to have ObjCImplementationDecl passed 36 // in here; it should never be necessary because that should be the lexical 37 // decl context for the ivar. 38 39 // If we know have an implementation (and the ivar is in it) then 40 // look up in the implementation layout. 41 const ASTRecordLayout *RL; 42 if (ID && declaresSameEntity(ID->getClassInterface(), Container)) 43 RL = &CGM.getContext().getASTObjCImplementationLayout(ID); 44 else 45 RL = &CGM.getContext().getASTObjCInterfaceLayout(Container); 46 47 // Compute field index. 48 // 49 // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is 50 // implemented. This should be fixed to get the information from the layout 51 // directly. 52 unsigned Index = 0; 53 54 for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin(); 55 IVD; IVD = IVD->getNextIvar()) { 56 if (Ivar == IVD) 57 break; 58 ++Index; 59 } 60 assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!"); 61 62 return RL->getFieldOffset(Index); 63} 64 65uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, 66 const ObjCInterfaceDecl *OID, 67 const ObjCIvarDecl *Ivar) { 68 return LookupFieldBitOffset(CGM, OID, nullptr, Ivar) / 69 CGM.getContext().getCharWidth(); 70} 71 72uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, 73 const ObjCImplementationDecl *OID, 74 const ObjCIvarDecl *Ivar) { 75 return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 76 CGM.getContext().getCharWidth(); 77} 78 79unsigned CGObjCRuntime::ComputeBitfieldBitOffset( 80 CodeGen::CodeGenModule &CGM, 81 const ObjCInterfaceDecl *ID, 82 const ObjCIvarDecl *Ivar) { 83 return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar); 84} 85 86LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, 87 const ObjCInterfaceDecl *OID, 88 llvm::Value *BaseValue, 89 const ObjCIvarDecl *Ivar, 90 unsigned CVRQualifiers, 91 llvm::Value *Offset) { 92 // Compute (type*) ( (char *) BaseValue + Offset) 93 QualType IvarTy = Ivar->getType(); 94 llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); 95 llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy); 96 V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr"); 97 98 if (!Ivar->isBitField()) { 99 V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); 100 LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy); 101 LV.getQuals().addCVRQualifiers(CVRQualifiers); 102 return LV; 103 } 104 105 // We need to compute an access strategy for this bit-field. We are given the 106 // offset to the first byte in the bit-field, the sub-byte offset is taken 107 // from the original layout. We reuse the normal bit-field access strategy by 108 // treating this as an access to a struct where the bit-field is in byte 0, 109 // and adjust the containing type size as appropriate. 110 // 111 // FIXME: Note that currently we make a very conservative estimate of the 112 // alignment of the bit-field, because (a) it is not clear what guarantees the 113 // runtime makes us, and (b) we don't have a way to specify that the struct is 114 // at an alignment plus offset. 115 // 116 // Note, there is a subtle invariant here: we can only call this routine on 117 // non-synthesized ivars but we may be called for synthesized ivars. However, 118 // a synthesized ivar can never be a bit-field, so this is safe. 119 uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar); 120 uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); 121 uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign(); 122 uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext()); 123 CharUnits StorageSize = 124 CGF.CGM.getContext().toCharUnitsFromBits( 125 llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits)); 126 CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits); 127 128 // Allocate a new CGBitFieldInfo object to describe this access. 129 // 130 // FIXME: This is incredibly wasteful, these should be uniqued or part of some 131 // layout object. However, this is blocked on other cleanups to the 132 // Objective-C code, so for now we just live with allocating a bunch of these 133 // objects. 134 CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( 135 CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize, 136 CGF.CGM.getContext().toBits(StorageSize), 137 Alignment.getQuantity())); 138 139 V = CGF.Builder.CreateBitCast(V, 140 llvm::Type::getIntNPtrTy(CGF.getLLVMContext(), 141 Info->StorageSize)); 142 return LValue::MakeBitfield(V, *Info, 143 IvarTy.withCVRQualifiers(CVRQualifiers), 144 Alignment); 145} 146 147namespace { 148 struct CatchHandler { 149 const VarDecl *Variable; 150 const Stmt *Body; 151 llvm::BasicBlock *Block; 152 llvm::Constant *TypeInfo; 153 }; 154 155 struct CallObjCEndCatch : EHScopeStack::Cleanup { 156 CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) : 157 MightThrow(MightThrow), Fn(Fn) {} 158 bool MightThrow; 159 llvm::Value *Fn; 160 161 void Emit(CodeGenFunction &CGF, Flags flags) override { 162 if (!MightThrow) { 163 CGF.Builder.CreateCall(Fn)->setDoesNotThrow(); 164 return; 165 } 166 167 CGF.EmitRuntimeCallOrInvoke(Fn); 168 } 169 }; 170} 171 172 173void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, 174 const ObjCAtTryStmt &S, 175 llvm::Constant *beginCatchFn, 176 llvm::Constant *endCatchFn, 177 llvm::Constant *exceptionRethrowFn) { 178 // Jump destination for falling out of catch bodies. 179 CodeGenFunction::JumpDest Cont; 180 if (S.getNumCatchStmts()) 181 Cont = CGF.getJumpDestInCurrentScope("eh.cont"); 182 183 CodeGenFunction::FinallyInfo FinallyInfo; 184 if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) 185 FinallyInfo.enter(CGF, Finally->getFinallyBody(), 186 beginCatchFn, endCatchFn, exceptionRethrowFn); 187 188 SmallVector<CatchHandler, 8> Handlers; 189 190 // Enter the catch, if there is one. 191 if (S.getNumCatchStmts()) { 192 for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { 193 const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); 194 const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); 195 196 Handlers.push_back(CatchHandler()); 197 CatchHandler &Handler = Handlers.back(); 198 Handler.Variable = CatchDecl; 199 Handler.Body = CatchStmt->getCatchBody(); 200 Handler.Block = CGF.createBasicBlock("catch"); 201 202 // @catch(...) always matches. 203 if (!CatchDecl) { 204 Handler.TypeInfo = nullptr; // catch-all 205 // Don't consider any other catches. 206 break; 207 } 208 209 Handler.TypeInfo = GetEHType(CatchDecl->getType()); 210 } 211 212 EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); 213 for (unsigned I = 0, E = Handlers.size(); I != E; ++I) 214 Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); 215 } 216 217 // Emit the try body. 218 CGF.EmitStmt(S.getTryBody()); 219 220 // Leave the try. 221 if (S.getNumCatchStmts()) 222 CGF.popCatchScope(); 223 224 // Remember where we were. 225 CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); 226 227 // Emit the handlers. 228 for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { 229 CatchHandler &Handler = Handlers[I]; 230 231 CGF.EmitBlock(Handler.Block); 232 llvm::Value *RawExn = CGF.getExceptionFromSlot(); 233 234 // Enter the catch. 235 llvm::Value *Exn = RawExn; 236 if (beginCatchFn) { 237 Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted"); 238 cast<llvm::CallInst>(Exn)->setDoesNotThrow(); 239 } 240 241 CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange()); 242 243 if (endCatchFn) { 244 // Add a cleanup to leave the catch. 245 bool EndCatchMightThrow = (Handler.Variable == nullptr); 246 247 CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup, 248 EndCatchMightThrow, 249 endCatchFn); 250 } 251 252 // Bind the catch parameter if it exists. 253 if (const VarDecl *CatchParam = Handler.Variable) { 254 llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); 255 llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType); 256 257 CGF.EmitAutoVarDecl(*CatchParam); 258 259 llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam); 260 261 switch (CatchParam->getType().getQualifiers().getObjCLifetime()) { 262 case Qualifiers::OCL_Strong: 263 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 264 // fallthrough 265 266 case Qualifiers::OCL_None: 267 case Qualifiers::OCL_ExplicitNone: 268 case Qualifiers::OCL_Autoreleasing: 269 CGF.Builder.CreateStore(CastExn, CatchParamAddr); 270 break; 271 272 case Qualifiers::OCL_Weak: 273 CGF.EmitARCInitWeak(CatchParamAddr, CastExn); 274 break; 275 } 276 } 277 278 CGF.ObjCEHValueStack.push_back(Exn); 279 CGF.EmitStmt(Handler.Body); 280 CGF.ObjCEHValueStack.pop_back(); 281 282 // Leave any cleanups associated with the catch. 283 cleanups.ForceCleanup(); 284 285 CGF.EmitBranchThroughCleanup(Cont); 286 } 287 288 // Go back to the try-statement fallthrough. 289 CGF.Builder.restoreIP(SavedIP); 290 291 // Pop out of the finally. 292 if (S.getFinallyStmt()) 293 FinallyInfo.exit(CGF); 294 295 if (Cont.isValid()) 296 CGF.EmitBlock(Cont.getBlock()); 297} 298 299namespace { 300 struct CallSyncExit : EHScopeStack::Cleanup { 301 llvm::Value *SyncExitFn; 302 llvm::Value *SyncArg; 303 CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg) 304 : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {} 305 306 void Emit(CodeGenFunction &CGF, Flags flags) override { 307 CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow(); 308 } 309 }; 310} 311 312void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF, 313 const ObjCAtSynchronizedStmt &S, 314 llvm::Function *syncEnterFn, 315 llvm::Function *syncExitFn) { 316 CodeGenFunction::RunCleanupsScope cleanups(CGF); 317 318 // Evaluate the lock operand. This is guaranteed to dominate the 319 // ARC release and lock-release cleanups. 320 const Expr *lockExpr = S.getSynchExpr(); 321 llvm::Value *lock; 322 if (CGF.getLangOpts().ObjCAutoRefCount) { 323 lock = CGF.EmitARCRetainScalarExpr(lockExpr); 324 lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock); 325 } else { 326 lock = CGF.EmitScalarExpr(lockExpr); 327 } 328 lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy); 329 330 // Acquire the lock. 331 CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow(); 332 333 // Register an all-paths cleanup to release the lock. 334 CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock); 335 336 // Emit the body of the statement. 337 CGF.EmitStmt(S.getSynchBody()); 338} 339 340/// Compute the pointer-to-function type to which a message send 341/// should be casted in order to correctly call the given method 342/// with the given arguments. 343/// 344/// \param method - may be null 345/// \param resultType - the result type to use if there's no method 346/// \param callArgs - the actual arguments, including implicit ones 347CGObjCRuntime::MessageSendInfo 348CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method, 349 QualType resultType, 350 CallArgList &callArgs) { 351 // If there's a method, use information from that. 352 if (method) { 353 const CGFunctionInfo &signature = 354 CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty); 355 356 llvm::PointerType *signatureType = 357 CGM.getTypes().GetFunctionType(signature)->getPointerTo(); 358 359 // If that's not variadic, there's no need to recompute the ABI 360 // arrangement. 361 if (!signature.isVariadic()) 362 return MessageSendInfo(signature, signatureType); 363 364 // Otherwise, there is. 365 FunctionType::ExtInfo einfo = signature.getExtInfo(); 366 const CGFunctionInfo &argsInfo = 367 CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, einfo, 368 signature.getRequiredArgs()); 369 370 return MessageSendInfo(argsInfo, signatureType); 371 } 372 373 // There's no method; just use a default CC. 374 const CGFunctionInfo &argsInfo = 375 CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, 376 FunctionType::ExtInfo(), 377 RequiredArgs::All); 378 379 // Derive the signature to call from that. 380 llvm::PointerType *signatureType = 381 CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo(); 382 return MessageSendInfo(argsInfo, signatureType); 383} 384