CGObjCRuntime.cpp revision 5f9e272e632e951b1efe824cd16acb4d96077930
1//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This abstract class defines the interface for Objective-C runtime-specific 11// code generation. It provides some concrete helper methods for functionality 12// shared between all (or most) of the Objective-C runtimes supported by clang. 13// 14//===----------------------------------------------------------------------===// 15 16#include "CGObjCRuntime.h" 17 18#include "CGRecordLayout.h" 19#include "CodeGenModule.h" 20#include "CodeGenFunction.h" 21#include "CGCleanup.h" 22 23#include "clang/AST/RecordLayout.h" 24#include "clang/AST/StmtObjC.h" 25 26#include "llvm/Support/CallSite.h" 27 28using namespace clang; 29using namespace CodeGen; 30 31static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM, 32 const ObjCInterfaceDecl *OID, 33 const ObjCImplementationDecl *ID, 34 const ObjCIvarDecl *Ivar) { 35 const ObjCInterfaceDecl *Container = Ivar->getContainingInterface(); 36 37 // FIXME: We should eliminate the need to have ObjCImplementationDecl passed 38 // in here; it should never be necessary because that should be the lexical 39 // decl context for the ivar. 40 41 // If we know have an implementation (and the ivar is in it) then 42 // look up in the implementation layout. 43 const ASTRecordLayout *RL; 44 if (ID && ID->getClassInterface() == Container) 45 RL = &CGM.getContext().getASTObjCImplementationLayout(ID); 46 else 47 RL = &CGM.getContext().getASTObjCInterfaceLayout(Container); 48 49 // Compute field index. 50 // 51 // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is 52 // implemented. This should be fixed to get the information from the layout 53 // directly. 54 unsigned Index = 0; 55 56 for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin(); 57 IVD; IVD = IVD->getNextIvar()) { 58 if (Ivar == IVD) 59 break; 60 ++Index; 61 } 62 assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!"); 63 64 return RL->getFieldOffset(Index); 65} 66 67uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, 68 const ObjCInterfaceDecl *OID, 69 const ObjCIvarDecl *Ivar) { 70 return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 71 CGM.getContext().getCharWidth(); 72} 73 74uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, 75 const ObjCImplementationDecl *OID, 76 const ObjCIvarDecl *Ivar) { 77 return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 78 CGM.getContext().getCharWidth(); 79} 80 81LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, 82 const ObjCInterfaceDecl *OID, 83 llvm::Value *BaseValue, 84 const ObjCIvarDecl *Ivar, 85 unsigned CVRQualifiers, 86 llvm::Value *Offset) { 87 // Compute (type*) ( (char *) BaseValue + Offset) 88 llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 89 QualType IvarTy = Ivar->getType(); 90 llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); 91 llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr); 92 V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr"); 93 V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); 94 95 if (!Ivar->isBitField()) { 96 LValue LV = CGF.MakeAddrLValue(V, IvarTy); 97 LV.getQuals().addCVRQualifiers(CVRQualifiers); 98 return LV; 99 } 100 101 // We need to compute an access strategy for this bit-field. We are given the 102 // offset to the first byte in the bit-field, the sub-byte offset is taken 103 // from the original layout. We reuse the normal bit-field access strategy by 104 // treating this as an access to a struct where the bit-field is in byte 0, 105 // and adjust the containing type size as appropriate. 106 // 107 // FIXME: Note that currently we make a very conservative estimate of the 108 // alignment of the bit-field, because (a) it is not clear what guarantees the 109 // runtime makes us, and (b) we don't have a way to specify that the struct is 110 // at an alignment plus offset. 111 // 112 // Note, there is a subtle invariant here: we can only call this routine on 113 // non-synthesized ivars but we may be called for synthesized ivars. However, 114 // a synthesized ivar can never be a bit-field, so this is safe. 115 const ASTRecordLayout &RL = 116 CGF.CGM.getContext().getASTObjCInterfaceLayout(OID); 117 uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize()); 118 uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar); 119 uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); 120 uint64_t ContainingTypeAlign = CGF.CGM.getContext().Target.getCharAlign(); 121 uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset); 122 uint64_t BitFieldSize = 123 Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue(); 124 125 // Allocate a new CGBitFieldInfo object to describe this access. 126 // 127 // FIXME: This is incredibly wasteful, these should be uniqued or part of some 128 // layout object. However, this is blocked on other cleanups to the 129 // Objective-C code, so for now we just live with allocating a bunch of these 130 // objects. 131 CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( 132 CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize, 133 ContainingTypeSize, ContainingTypeAlign)); 134 135 return LValue::MakeBitfield(V, *Info, 136 IvarTy.withCVRQualifiers(CVRQualifiers)); 137} 138 139namespace { 140 struct CatchHandler { 141 const VarDecl *Variable; 142 const Stmt *Body; 143 llvm::BasicBlock *Block; 144 llvm::Value *TypeInfo; 145 }; 146 147 struct CallObjCEndCatch : EHScopeStack::Cleanup { 148 CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) : 149 MightThrow(MightThrow), Fn(Fn) {} 150 bool MightThrow; 151 llvm::Value *Fn; 152 153 void Emit(CodeGenFunction &CGF, Flags flags) { 154 if (!MightThrow) { 155 CGF.Builder.CreateCall(Fn)->setDoesNotThrow(); 156 return; 157 } 158 159 CGF.EmitCallOrInvoke(Fn); 160 } 161 }; 162} 163 164 165void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, 166 const ObjCAtTryStmt &S, 167 llvm::Constant *beginCatchFn, 168 llvm::Constant *endCatchFn, 169 llvm::Constant *exceptionRethrowFn) { 170 // Jump destination for falling out of catch bodies. 171 CodeGenFunction::JumpDest Cont; 172 if (S.getNumCatchStmts()) 173 Cont = CGF.getJumpDestInCurrentScope("eh.cont"); 174 175 CodeGenFunction::FinallyInfo FinallyInfo; 176 if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) 177 FinallyInfo.enter(CGF, Finally->getFinallyBody(), 178 beginCatchFn, endCatchFn, exceptionRethrowFn); 179 180 SmallVector<CatchHandler, 8> Handlers; 181 182 // Enter the catch, if there is one. 183 if (S.getNumCatchStmts()) { 184 for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { 185 const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); 186 const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); 187 188 Handlers.push_back(CatchHandler()); 189 CatchHandler &Handler = Handlers.back(); 190 Handler.Variable = CatchDecl; 191 Handler.Body = CatchStmt->getCatchBody(); 192 Handler.Block = CGF.createBasicBlock("catch"); 193 194 // @catch(...) always matches. 195 if (!CatchDecl) { 196 Handler.TypeInfo = 0; // catch-all 197 // Don't consider any other catches. 198 break; 199 } 200 201 Handler.TypeInfo = GetEHType(CatchDecl->getType()); 202 } 203 204 EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); 205 for (unsigned I = 0, E = Handlers.size(); I != E; ++I) 206 Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); 207 } 208 209 // Emit the try body. 210 CGF.EmitStmt(S.getTryBody()); 211 212 // Leave the try. 213 if (S.getNumCatchStmts()) 214 CGF.EHStack.popCatch(); 215 216 // Remember where we were. 217 CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); 218 219 // Emit the handlers. 220 for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { 221 CatchHandler &Handler = Handlers[I]; 222 223 CGF.EmitBlock(Handler.Block); 224 llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot()); 225 226 // Enter the catch. 227 llvm::Value *Exn = RawExn; 228 if (beginCatchFn) { 229 Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted"); 230 cast<llvm::CallInst>(Exn)->setDoesNotThrow(); 231 } 232 233 CodeGenFunction::RunCleanupsScope cleanups(CGF); 234 235 if (endCatchFn) { 236 // Add a cleanup to leave the catch. 237 bool EndCatchMightThrow = (Handler.Variable == 0); 238 239 CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup, 240 EndCatchMightThrow, 241 endCatchFn); 242 } 243 244 // Bind the catch parameter if it exists. 245 if (const VarDecl *CatchParam = Handler.Variable) { 246 llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); 247 llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType); 248 249 CGF.EmitAutoVarDecl(*CatchParam); 250 CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam)); 251 } 252 253 CGF.ObjCEHValueStack.push_back(Exn); 254 CGF.EmitStmt(Handler.Body); 255 CGF.ObjCEHValueStack.pop_back(); 256 257 // Leave any cleanups associated with the catch. 258 cleanups.ForceCleanup(); 259 260 CGF.EmitBranchThroughCleanup(Cont); 261 } 262 263 // Go back to the try-statement fallthrough. 264 CGF.Builder.restoreIP(SavedIP); 265 266 // Pop out of the finally. 267 if (S.getFinallyStmt()) 268 FinallyInfo.exit(CGF); 269 270 if (Cont.isValid()) 271 CGF.EmitBlock(Cont.getBlock()); 272} 273 274namespace { 275 struct CallSyncExit : EHScopeStack::Cleanup { 276 llvm::Value *SyncExitFn; 277 llvm::Value *SyncArg; 278 CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg) 279 : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {} 280 281 void Emit(CodeGenFunction &CGF, Flags flags) { 282 CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow(); 283 } 284 }; 285} 286 287void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF, 288 const ObjCAtSynchronizedStmt &S, 289 llvm::Function *syncEnterFn, 290 llvm::Function *syncExitFn) { 291 // Evaluate the lock operand. This should dominate the cleanup. 292 llvm::Value *SyncArg = 293 CGF.EmitScalarExpr(S.getSynchExpr()); 294 295 // Acquire the lock. 296 SyncArg = CGF.Builder.CreateBitCast(SyncArg, syncEnterFn->getFunctionType()->getParamType(0)); 297 CGF.Builder.CreateCall(syncEnterFn, SyncArg); 298 299 // Register an all-paths cleanup to release the lock. 300 CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, 301 SyncArg); 302 303 // Emit the body of the statement. 304 CGF.EmitStmt(S.getSynchBody()); 305 306 // Pop the lock-release cleanup. 307 CGF.PopCleanupBlock(); 308} 309