CGExpr.cpp revision 8e6404ca28d6bbb76e97ea2a53a74816c2a74665
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGRecordLayout.h" 18#include "CGObjCRuntime.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/DeclObjC.h" 21#include "llvm/Intrinsics.h" 22#include "clang/CodeGen/CodeGenOptions.h" 23#include "llvm/Target/TargetData.h" 24using namespace clang; 25using namespace CodeGen; 26 27//===--------------------------------------------------------------------===// 28// Miscellaneous Helper Methods 29//===--------------------------------------------------------------------===// 30 31/// CreateTempAlloca - This creates a alloca and inserts it into the entry 32/// block. 33llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 34 const llvm::Twine &Name) { 35 if (!Builder.isNamePreserving()) 36 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 37 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 38} 39 40void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 41 llvm::Value *Init) { 42 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 43 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 44 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 45} 46 47llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty, 48 const llvm::Twine &Name) { 49 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 50 // FIXME: Should we prefer the preferred type alignment here? 51 CharUnits Align = getContext().getTypeAlignInChars(Ty); 52 Alloc->setAlignment(Align.getQuantity()); 53 return Alloc; 54} 55 56llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, 57 const llvm::Twine &Name) { 58 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 59 // FIXME: Should we prefer the preferred type alignment here? 60 CharUnits Align = getContext().getTypeAlignInChars(Ty); 61 Alloc->setAlignment(Align.getQuantity()); 62 return Alloc; 63} 64 65/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 66/// expression and compare the result against zero, returning an Int1Ty value. 67llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 68 QualType BoolTy = getContext().BoolTy; 69 if (E->getType()->isMemberFunctionPointerType()) { 70 LValue LV = EmitAggExprToLValue(E); 71 72 // Get the pointer. 73 llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0, 74 "src.ptr"); 75 FuncPtr = Builder.CreateLoad(FuncPtr); 76 77 llvm::Value *IsNotNull = 78 Builder.CreateICmpNE(FuncPtr, 79 llvm::Constant::getNullValue(FuncPtr->getType()), 80 "tobool"); 81 82 return IsNotNull; 83 } 84 if (!E->getType()->isAnyComplexType()) 85 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 86 87 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 88} 89 90/// EmitAnyExpr - Emit code to compute the specified expression which can have 91/// any type. The result is returned as an RValue struct. If this is an 92/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the 93/// result should be returned. 94RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 95 bool IsAggLocVolatile, bool IgnoreResult, 96 bool IsInitializer) { 97 if (!hasAggregateLLVMType(E->getType())) 98 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 99 else if (E->getType()->isAnyComplexType()) 100 return RValue::getComplex(EmitComplexExpr(E, false, false, 101 IgnoreResult, IgnoreResult)); 102 103 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 104 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 105} 106 107/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 108/// always be accessible even if no aggregate location is provided. 109RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 110 bool IsAggLocVolatile, 111 bool IsInitializer) { 112 llvm::Value *AggLoc = 0; 113 114 if (hasAggregateLLVMType(E->getType()) && 115 !E->getType()->isAnyComplexType()) 116 AggLoc = CreateMemTemp(E->getType(), "agg.tmp"); 117 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 118 IsInitializer); 119} 120 121/// EmitAnyExprToMem - Evaluate an expression into a given memory 122/// location. 123void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 124 llvm::Value *Location, 125 bool IsLocationVolatile, 126 bool IsInit) { 127 if (E->getType()->isComplexType()) 128 EmitComplexExprIntoAddr(E, Location, IsLocationVolatile); 129 else if (hasAggregateLLVMType(E->getType())) 130 EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit); 131 else { 132 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 133 LValue LV = LValue::MakeAddr(Location, MakeQualifiers(E->getType())); 134 EmitStoreThroughLValue(RV, LV, E->getType()); 135 } 136} 137 138RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 139 bool IsInitializer) { 140 bool ShouldDestroyTemporaries = false; 141 unsigned OldNumLiveTemporaries = 0; 142 143 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 144 E = DAE->getExpr(); 145 146 if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { 147 ShouldDestroyTemporaries = true; 148 149 // Keep track of the current cleanup stack depth. 150 OldNumLiveTemporaries = LiveTemporaries.size(); 151 152 E = TE->getSubExpr(); 153 } 154 155 RValue Val; 156 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 157 // Emit the expr as an lvalue. 158 LValue LV = EmitLValue(E); 159 if (LV.isSimple()) { 160 if (ShouldDestroyTemporaries) { 161 // Pop temporaries. 162 while (LiveTemporaries.size() > OldNumLiveTemporaries) 163 PopCXXTemporary(); 164 } 165 166 return RValue::get(LV.getAddress()); 167 } 168 169 Val = EmitLoadOfLValue(LV, E->getType()); 170 171 if (ShouldDestroyTemporaries) { 172 // Pop temporaries. 173 while (LiveTemporaries.size() > OldNumLiveTemporaries) 174 PopCXXTemporary(); 175 } 176 } else { 177 const CXXBaseSpecifierArray *BasePath = 0; 178 const CXXRecordDecl *DerivedClassDecl = 0; 179 180 if (const CastExpr *CE = 181 dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) { 182 if (CE->getCastKind() == CastExpr::CK_DerivedToBase) { 183 E = CE->getSubExpr(); 184 185 BasePath = &CE->getBasePath(); 186 DerivedClassDecl = 187 cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 188 } 189 } 190 191 Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, 192 IsInitializer); 193 194 if (ShouldDestroyTemporaries) { 195 // Pop temporaries. 196 while (LiveTemporaries.size() > OldNumLiveTemporaries) 197 PopCXXTemporary(); 198 } 199 200 if (IsInitializer) { 201 // We might have to destroy the temporary variable. 202 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 203 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 204 if (!ClassDecl->hasTrivialDestructor()) { 205 const CXXDestructorDecl *Dtor = 206 ClassDecl->getDestructor(getContext()); 207 208 { 209 DelayedCleanupBlock Scope(*this); 210 EmitCXXDestructorCall(Dtor, Dtor_Complete, 211 /*ForVirtualBase=*/false, 212 Val.getAggregateAddr()); 213 214 // Make sure to jump to the exit block. 215 EmitBranch(Scope.getCleanupExitBlock()); 216 } 217 if (Exceptions) { 218 EHCleanupBlock Cleanup(*this); 219 EmitCXXDestructorCall(Dtor, Dtor_Complete, 220 /*ForVirtualBase=*/false, 221 Val.getAggregateAddr()); 222 } 223 } 224 } 225 } 226 } 227 228 // Check if need to perform the derived-to-base cast. 229 if (BasePath) { 230 llvm::Value *Derived = Val.getAggregateAddr(); 231 llvm::Value *Base = 232 GetAddressOfBaseClass(Derived, DerivedClassDecl, *BasePath, 233 /*NullCheckValue=*/false); 234 return RValue::get(Base); 235 } 236 } 237 238 if (Val.isAggregate()) { 239 Val = RValue::get(Val.getAggregateAddr()); 240 } else { 241 // Create a temporary variable that we can bind the reference to. 242 llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp"); 243 if (Val.isScalar()) 244 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 245 else 246 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 247 Val = RValue::get(Temp); 248 } 249 250 return Val; 251} 252 253 254/// getAccessedFieldNo - Given an encoded value and a result number, return the 255/// input field number being accessed. 256unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 257 const llvm::Constant *Elts) { 258 if (isa<llvm::ConstantAggregateZero>(Elts)) 259 return 0; 260 261 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 262} 263 264void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 265 if (!CatchUndefined) 266 return; 267 268 const llvm::Type *Size_tTy 269 = llvm::IntegerType::get(VMContext, LLVMPointerWidth); 270 Address = Builder.CreateBitCast(Address, PtrToInt8Ty); 271 272 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1); 273 const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1); 274 275 // In time, people may want to control this and use a 1 here. 276 llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0); 277 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 278 llvm::BasicBlock *Cont = createBasicBlock(); 279 llvm::BasicBlock *Check = createBasicBlock(); 280 llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL); 281 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 282 283 EmitBlock(Check); 284 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 285 llvm::ConstantInt::get(Size_tTy, Size)), 286 Cont, getTrapBB()); 287 EmitBlock(Cont); 288} 289 290 291llvm::Value *CodeGenFunction:: 292EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 293 bool isInc, bool isPre) { 294 QualType ValTy = E->getSubExpr()->getType(); 295 llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal(); 296 297 int AmountVal = isInc ? 1 : -1; 298 299 if (ValTy->isPointerType() && 300 ValTy->getAs<PointerType>()->isVariableArrayType()) { 301 // The amount of the addition/subtraction needs to account for the VLA size 302 ErrorUnsupported(E, "VLA pointer inc/dec"); 303 } 304 305 llvm::Value *NextVal; 306 if (const llvm::PointerType *PT = 307 dyn_cast<llvm::PointerType>(InVal->getType())) { 308 llvm::Constant *Inc = 309 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); 310 if (!isa<llvm::FunctionType>(PT->getElementType())) { 311 QualType PTEE = ValTy->getPointeeType(); 312 if (const ObjCInterfaceType *OIT = 313 dyn_cast<ObjCInterfaceType>(PTEE)) { 314 // Handle interface types, which are not represented with a concrete 315 // type. 316 int size = getContext().getTypeSize(OIT) / 8; 317 if (!isInc) 318 size = -size; 319 Inc = llvm::ConstantInt::get(Inc->getType(), size); 320 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 321 InVal = Builder.CreateBitCast(InVal, i8Ty); 322 NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); 323 llvm::Value *lhs = LV.getAddress(); 324 lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); 325 LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy)); 326 } else 327 NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); 328 } else { 329 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 330 NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); 331 NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); 332 NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); 333 } 334 } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) { 335 // Bool++ is an interesting case, due to promotion rules, we get: 336 // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> 337 // Bool = ((int)Bool+1) != 0 338 // An interesting aspect of this is that increment is always true. 339 // Decrement does not have this property. 340 NextVal = llvm::ConstantInt::getTrue(VMContext); 341 } else if (isa<llvm::IntegerType>(InVal->getType())) { 342 NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); 343 344 // Signed integer overflow is undefined behavior. 345 if (ValTy->isSignedIntegerType()) 346 NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec"); 347 else 348 NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); 349 } else { 350 // Add the inc/dec to the real part. 351 if (InVal->getType()->isFloatTy()) 352 NextVal = 353 llvm::ConstantFP::get(VMContext, 354 llvm::APFloat(static_cast<float>(AmountVal))); 355 else if (InVal->getType()->isDoubleTy()) 356 NextVal = 357 llvm::ConstantFP::get(VMContext, 358 llvm::APFloat(static_cast<double>(AmountVal))); 359 else { 360 llvm::APFloat F(static_cast<float>(AmountVal)); 361 bool ignored; 362 F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, 363 &ignored); 364 NextVal = llvm::ConstantFP::get(VMContext, F); 365 } 366 NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); 367 } 368 369 // Store the updated result through the lvalue. 370 if (LV.isBitField()) 371 EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal); 372 else 373 EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); 374 375 // If this is a postinc, return the value read from memory, otherwise use the 376 // updated value. 377 return isPre ? NextVal : InVal; 378} 379 380 381CodeGenFunction::ComplexPairTy CodeGenFunction:: 382EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 383 bool isInc, bool isPre) { 384 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 385 LV.isVolatileQualified()); 386 387 llvm::Value *NextVal; 388 if (isa<llvm::IntegerType>(InVal.first->getType())) { 389 uint64_t AmountVal = isInc ? 1 : -1; 390 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 391 392 // Add the inc/dec to the real part. 393 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 394 } else { 395 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 396 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 397 if (!isInc) 398 FVal.changeSign(); 399 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 400 401 // Add the inc/dec to the real part. 402 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 403 } 404 405 ComplexPairTy IncVal(NextVal, InVal.second); 406 407 // Store the updated result through the lvalue. 408 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 409 410 // If this is a postinc, return the value read from memory, otherwise use the 411 // updated value. 412 return isPre ? IncVal : InVal; 413} 414 415 416//===----------------------------------------------------------------------===// 417// LValue Expression Emission 418//===----------------------------------------------------------------------===// 419 420RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 421 if (Ty->isVoidType()) 422 return RValue::get(0); 423 424 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 425 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 426 llvm::Value *U = llvm::UndefValue::get(EltTy); 427 return RValue::getComplex(std::make_pair(U, U)); 428 } 429 430 if (hasAggregateLLVMType(Ty)) { 431 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 432 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 433 } 434 435 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 436} 437 438RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 439 const char *Name) { 440 ErrorUnsupported(E, Name); 441 return GetUndefRValue(E->getType()); 442} 443 444LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 445 const char *Name) { 446 ErrorUnsupported(E, Name); 447 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 448 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 449 MakeQualifiers(E->getType())); 450} 451 452LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 453 LValue LV = EmitLValue(E); 454 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 455 EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8); 456 return LV; 457} 458 459/// EmitLValue - Emit code to compute a designator that specifies the location 460/// of the expression. 461/// 462/// This can return one of two things: a simple address or a bitfield reference. 463/// In either case, the LLVM Value* in the LValue structure is guaranteed to be 464/// an LLVM pointer type. 465/// 466/// If this returns a bitfield reference, nothing about the pointee type of the 467/// LLVM value is known: For example, it may not be a pointer to an integer. 468/// 469/// If this returns a normal address, and if the lvalue's C type is fixed size, 470/// this method guarantees that the returned pointer type will point to an LLVM 471/// type of the same size of the lvalue's type. If the lvalue has a variable 472/// length type, this is not possible. 473/// 474LValue CodeGenFunction::EmitLValue(const Expr *E) { 475 switch (E->getStmtClass()) { 476 default: return EmitUnsupportedLValue(E, "l-value expression"); 477 478 case Expr::ObjCIsaExprClass: 479 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 480 case Expr::BinaryOperatorClass: 481 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 482 case Expr::CompoundAssignOperatorClass: 483 return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E)); 484 case Expr::CallExprClass: 485 case Expr::CXXMemberCallExprClass: 486 case Expr::CXXOperatorCallExprClass: 487 return EmitCallExprLValue(cast<CallExpr>(E)); 488 case Expr::VAArgExprClass: 489 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 490 case Expr::DeclRefExprClass: 491 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 492 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 493 case Expr::PredefinedExprClass: 494 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 495 case Expr::StringLiteralClass: 496 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 497 case Expr::ObjCEncodeExprClass: 498 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 499 500 case Expr::BlockDeclRefExprClass: 501 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 502 503 case Expr::CXXTemporaryObjectExprClass: 504 case Expr::CXXConstructExprClass: 505 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 506 case Expr::CXXBindTemporaryExprClass: 507 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 508 case Expr::CXXExprWithTemporariesClass: 509 return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); 510 case Expr::CXXZeroInitValueExprClass: 511 return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); 512 case Expr::CXXDefaultArgExprClass: 513 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 514 case Expr::CXXTypeidExprClass: 515 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 516 517 case Expr::ObjCMessageExprClass: 518 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 519 case Expr::ObjCIvarRefExprClass: 520 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 521 case Expr::ObjCPropertyRefExprClass: 522 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 523 case Expr::ObjCImplicitSetterGetterRefExprClass: 524 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 525 case Expr::ObjCSuperExprClass: 526 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 527 528 case Expr::StmtExprClass: 529 return EmitStmtExprLValue(cast<StmtExpr>(E)); 530 case Expr::UnaryOperatorClass: 531 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 532 case Expr::ArraySubscriptExprClass: 533 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 534 case Expr::ExtVectorElementExprClass: 535 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 536 case Expr::MemberExprClass: 537 return EmitMemberExpr(cast<MemberExpr>(E)); 538 case Expr::CompoundLiteralExprClass: 539 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 540 case Expr::ConditionalOperatorClass: 541 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 542 case Expr::ChooseExprClass: 543 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 544 case Expr::ImplicitCastExprClass: 545 case Expr::CStyleCastExprClass: 546 case Expr::CXXFunctionalCastExprClass: 547 case Expr::CXXStaticCastExprClass: 548 case Expr::CXXDynamicCastExprClass: 549 case Expr::CXXReinterpretCastExprClass: 550 case Expr::CXXConstCastExprClass: 551 return EmitCastLValue(cast<CastExpr>(E)); 552 } 553} 554 555llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 556 QualType Ty) { 557 llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp"); 558 if (Volatile) 559 Load->setVolatile(true); 560 561 // Bool can have different representation in memory than in registers. 562 llvm::Value *V = Load; 563 if (Ty->isBooleanType()) 564 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 565 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 566 567 return V; 568} 569 570void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 571 bool Volatile, QualType Ty) { 572 573 if (Ty->isBooleanType()) { 574 // Bool can have different representation in memory than in registers. 575 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 576 Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false); 577 } 578 Builder.CreateStore(Value, Addr, Volatile); 579} 580 581/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 582/// method emits the address of the lvalue, then loads the result as an rvalue, 583/// returning the rvalue. 584RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 585 if (LV.isObjCWeak()) { 586 // load of a __weak object. 587 llvm::Value *AddrWeakObj = LV.getAddress(); 588 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 589 AddrWeakObj)); 590 } 591 592 if (LV.isSimple()) { 593 llvm::Value *Ptr = LV.getAddress(); 594 const llvm::Type *EltTy = 595 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 596 597 // Simple scalar l-value. 598 // 599 // FIXME: We shouldn't have to use isSingleValueType here. 600 if (EltTy->isSingleValueType()) 601 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 602 ExprType)); 603 604 assert(ExprType->isFunctionType() && "Unknown scalar value"); 605 return RValue::get(Ptr); 606 } 607 608 if (LV.isVectorElt()) { 609 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 610 LV.isVolatileQualified(), "tmp"); 611 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 612 "vecext")); 613 } 614 615 // If this is a reference to a subset of the elements of a vector, either 616 // shuffle the input or extract/insert them as appropriate. 617 if (LV.isExtVectorElt()) 618 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 619 620 if (LV.isBitField()) 621 return EmitLoadOfBitfieldLValue(LV, ExprType); 622 623 if (LV.isPropertyRef()) 624 return EmitLoadOfPropertyRefLValue(LV, ExprType); 625 626 assert(LV.isKVCRef() && "Unknown LValue type!"); 627 return EmitLoadOfKVCRefLValue(LV, ExprType); 628} 629 630RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 631 QualType ExprType) { 632 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 633 634 // Get the output type. 635 const llvm::Type *ResLTy = ConvertType(ExprType); 636 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 637 638 // Compute the result as an OR of all of the individual component accesses. 639 llvm::Value *Res = 0; 640 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 641 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 642 643 // Get the field pointer. 644 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 645 646 // Only offset by the field index if used, so that incoming values are not 647 // required to be structures. 648 if (AI.FieldIndex) 649 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 650 651 // Offset by the byte offset, if used. 652 if (AI.FieldByteOffset) { 653 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 654 Ptr = Builder.CreateBitCast(Ptr, i8PTy); 655 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs"); 656 } 657 658 // Cast to the access type. 659 const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth, 660 ExprType.getAddressSpace()); 661 Ptr = Builder.CreateBitCast(Ptr, PTy); 662 663 // Perform the load. 664 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 665 if (AI.AccessAlignment) 666 Load->setAlignment(AI.AccessAlignment); 667 668 // Shift out unused low bits and mask out unused high bits. 669 llvm::Value *Val = Load; 670 if (AI.FieldBitStart) 671 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 672 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 673 AI.TargetBitWidth), 674 "bf.clear"); 675 676 // Extend or truncate to the target size. 677 if (AI.AccessWidth < ResSizeInBits) 678 Val = Builder.CreateZExt(Val, ResLTy); 679 else if (AI.AccessWidth > ResSizeInBits) 680 Val = Builder.CreateTrunc(Val, ResLTy); 681 682 // Shift into place, and OR into the result. 683 if (AI.TargetBitOffset) 684 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 685 Res = Res ? Builder.CreateOr(Res, Val) : Val; 686 } 687 688 // If the bit-field is signed, perform the sign-extension. 689 // 690 // FIXME: This can easily be folded into the load of the high bits, which 691 // could also eliminate the mask of high bits in some situations. 692 if (Info.isSigned()) { 693 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 694 if (ExtraBits) 695 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 696 ExtraBits, "bf.val.sext"); 697 } 698 699 return RValue::get(Res); 700} 701 702RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 703 QualType ExprType) { 704 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 705} 706 707RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 708 QualType ExprType) { 709 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 710} 711 712// If this is a reference to a subset of the elements of a vector, create an 713// appropriate shufflevector. 714RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 715 QualType ExprType) { 716 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 717 LV.isVolatileQualified(), "tmp"); 718 719 const llvm::Constant *Elts = LV.getExtVectorElts(); 720 721 // If the result of the expression is a non-vector type, we must be extracting 722 // a single element. Just codegen as an extractelement. 723 const VectorType *ExprVT = ExprType->getAs<VectorType>(); 724 if (!ExprVT) { 725 unsigned InIdx = getAccessedFieldNo(0, Elts); 726 llvm::Value *Elt = llvm::ConstantInt::get( 727 llvm::Type::getInt32Ty(VMContext), InIdx); 728 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 729 } 730 731 // Always use shuffle vector to try to retain the original program structure 732 unsigned NumResultElts = ExprVT->getNumElements(); 733 734 llvm::SmallVector<llvm::Constant*, 4> Mask; 735 for (unsigned i = 0; i != NumResultElts; ++i) { 736 unsigned InIdx = getAccessedFieldNo(i, Elts); 737 Mask.push_back(llvm::ConstantInt::get( 738 llvm::Type::getInt32Ty(VMContext), InIdx)); 739 } 740 741 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 742 Vec = Builder.CreateShuffleVector(Vec, 743 llvm::UndefValue::get(Vec->getType()), 744 MaskV, "tmp"); 745 return RValue::get(Vec); 746} 747 748 749 750/// EmitStoreThroughLValue - Store the specified rvalue into the specified 751/// lvalue, where both are guaranteed to the have the same type, and that type 752/// is 'Ty'. 753void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 754 QualType Ty) { 755 if (!Dst.isSimple()) { 756 if (Dst.isVectorElt()) { 757 // Read/modify/write the vector, inserting the new element. 758 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 759 Dst.isVolatileQualified(), "tmp"); 760 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 761 Dst.getVectorIdx(), "vecins"); 762 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 763 return; 764 } 765 766 // If this is an update of extended vector elements, insert them as 767 // appropriate. 768 if (Dst.isExtVectorElt()) 769 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 770 771 if (Dst.isBitField()) 772 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 773 774 if (Dst.isPropertyRef()) 775 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 776 777 assert(Dst.isKVCRef() && "Unknown LValue type"); 778 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 779 } 780 781 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 782 // load of a __weak object. 783 llvm::Value *LvalueDst = Dst.getAddress(); 784 llvm::Value *src = Src.getScalarVal(); 785 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 786 return; 787 } 788 789 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 790 // load of a __strong object. 791 llvm::Value *LvalueDst = Dst.getAddress(); 792 llvm::Value *src = Src.getScalarVal(); 793 if (Dst.isObjCIvar()) { 794 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 795 const llvm::Type *ResultType = ConvertType(getContext().LongTy); 796 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 797 llvm::Value *dst = RHS; 798 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 799 llvm::Value *LHS = 800 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 801 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 802 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 803 BytesBetween); 804 } else if (Dst.isGlobalObjCRef()) 805 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 806 else 807 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 808 return; 809 } 810 811 assert(Src.isScalar() && "Can't emit an agg store with this method"); 812 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 813 Dst.isVolatileQualified(), Ty); 814} 815 816void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 817 QualType Ty, 818 llvm::Value **Result) { 819 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 820 821 // Get the output type. 822 const llvm::Type *ResLTy = ConvertTypeForMem(Ty); 823 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 824 825 // Get the source value, truncated to the width of the bit-field. 826 llvm::Value *SrcVal = Src.getScalarVal(); 827 828 if (Ty->isBooleanType()) 829 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 830 831 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 832 Info.getSize()), 833 "bf.value"); 834 835 // Return the new value of the bit-field, if requested. 836 if (Result) { 837 // Cast back to the proper type for result. 838 const llvm::Type *SrcTy = Src.getScalarVal()->getType(); 839 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 840 "bf.reload.val"); 841 842 // Sign extend if necessary. 843 if (Info.isSigned()) { 844 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 845 if (ExtraBits) 846 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 847 ExtraBits, "bf.reload.sext"); 848 } 849 850 *Result = ReloadVal; 851 } 852 853 // Iterate over the components, writing each piece to memory. 854 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 855 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 856 857 // Get the field pointer. 858 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 859 860 // Only offset by the field index if used, so that incoming values are not 861 // required to be structures. 862 if (AI.FieldIndex) 863 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 864 865 // Offset by the byte offset, if used. 866 if (AI.FieldByteOffset) { 867 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 868 Ptr = Builder.CreateBitCast(Ptr, i8PTy); 869 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs"); 870 } 871 872 // Cast to the access type. 873 const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth, 874 Ty.getAddressSpace()); 875 Ptr = Builder.CreateBitCast(Ptr, PTy); 876 877 // Extract the piece of the bit-field value to write in this access, limited 878 // to the values that are part of this access. 879 llvm::Value *Val = SrcVal; 880 if (AI.TargetBitOffset) 881 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 882 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 883 AI.TargetBitWidth)); 884 885 // Extend or truncate to the access size. 886 const llvm::Type *AccessLTy = 887 llvm::Type::getIntNTy(VMContext, AI.AccessWidth); 888 if (ResSizeInBits < AI.AccessWidth) 889 Val = Builder.CreateZExt(Val, AccessLTy); 890 else if (ResSizeInBits > AI.AccessWidth) 891 Val = Builder.CreateTrunc(Val, AccessLTy); 892 893 // Shift into the position in memory. 894 if (AI.FieldBitStart) 895 Val = Builder.CreateShl(Val, AI.FieldBitStart); 896 897 // If necessary, load and OR in bits that are outside of the bit-field. 898 if (AI.TargetBitWidth != AI.AccessWidth) { 899 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 900 if (AI.AccessAlignment) 901 Load->setAlignment(AI.AccessAlignment); 902 903 // Compute the mask for zeroing the bits that are part of the bit-field. 904 llvm::APInt InvMask = 905 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 906 AI.FieldBitStart + AI.TargetBitWidth); 907 908 // Apply the mask and OR in to the value to write. 909 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 910 } 911 912 // Write the value. 913 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 914 Dst.isVolatileQualified()); 915 if (AI.AccessAlignment) 916 Store->setAlignment(AI.AccessAlignment); 917 } 918} 919 920void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 921 LValue Dst, 922 QualType Ty) { 923 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 924} 925 926void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 927 LValue Dst, 928 QualType Ty) { 929 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 930} 931 932void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 933 LValue Dst, 934 QualType Ty) { 935 // This access turns into a read/modify/write of the vector. Load the input 936 // value now. 937 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 938 Dst.isVolatileQualified(), "tmp"); 939 const llvm::Constant *Elts = Dst.getExtVectorElts(); 940 941 llvm::Value *SrcVal = Src.getScalarVal(); 942 943 if (const VectorType *VTy = Ty->getAs<VectorType>()) { 944 unsigned NumSrcElts = VTy->getNumElements(); 945 unsigned NumDstElts = 946 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 947 if (NumDstElts == NumSrcElts) { 948 // Use shuffle vector is the src and destination are the same number of 949 // elements and restore the vector mask since it is on the side it will be 950 // stored. 951 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 952 for (unsigned i = 0; i != NumSrcElts; ++i) { 953 unsigned InIdx = getAccessedFieldNo(i, Elts); 954 Mask[InIdx] = llvm::ConstantInt::get( 955 llvm::Type::getInt32Ty(VMContext), i); 956 } 957 958 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 959 Vec = Builder.CreateShuffleVector(SrcVal, 960 llvm::UndefValue::get(Vec->getType()), 961 MaskV, "tmp"); 962 } else if (NumDstElts > NumSrcElts) { 963 // Extended the source vector to the same length and then shuffle it 964 // into the destination. 965 // FIXME: since we're shuffling with undef, can we just use the indices 966 // into that? This could be simpler. 967 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 968 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 969 unsigned i; 970 for (i = 0; i != NumSrcElts; ++i) 971 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 972 for (; i != NumDstElts; ++i) 973 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 974 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 975 ExtMask.size()); 976 llvm::Value *ExtSrcVal = 977 Builder.CreateShuffleVector(SrcVal, 978 llvm::UndefValue::get(SrcVal->getType()), 979 ExtMaskV, "tmp"); 980 // build identity 981 llvm::SmallVector<llvm::Constant*, 4> Mask; 982 for (unsigned i = 0; i != NumDstElts; ++i) 983 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 984 985 // modify when what gets shuffled in 986 for (unsigned i = 0; i != NumSrcElts; ++i) { 987 unsigned Idx = getAccessedFieldNo(i, Elts); 988 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 989 } 990 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 991 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 992 } else { 993 // We should never shorten the vector 994 assert(0 && "unexpected shorten vector length"); 995 } 996 } else { 997 // If the Src is a scalar (not a vector) it must be updating one element. 998 unsigned InIdx = getAccessedFieldNo(0, Elts); 999 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1000 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1001 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 1002 } 1003 1004 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1005} 1006 1007// setObjCGCLValueClass - sets class of he lvalue for the purpose of 1008// generating write-barries API. It is currently a global, ivar, 1009// or neither. 1010static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1011 LValue &LV) { 1012 if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC) 1013 return; 1014 1015 if (isa<ObjCIvarRefExpr>(E)) { 1016 LV.SetObjCIvar(LV, true); 1017 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1018 LV.setBaseIvarExp(Exp->getBase()); 1019 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1020 return; 1021 } 1022 1023 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1024 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1025 if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) || 1026 VD->isFileVarDecl()) 1027 LV.SetGlobalObjCRef(LV, true); 1028 } 1029 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1030 return; 1031 } 1032 1033 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1034 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1035 return; 1036 } 1037 1038 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1039 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1040 if (LV.isObjCIvar()) { 1041 // If cast is to a structure pointer, follow gcc's behavior and make it 1042 // a non-ivar write-barrier. 1043 QualType ExpTy = E->getType(); 1044 if (ExpTy->isPointerType()) 1045 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1046 if (ExpTy->isRecordType()) 1047 LV.SetObjCIvar(LV, false); 1048 } 1049 return; 1050 } 1051 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1052 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1053 return; 1054 } 1055 1056 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1057 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1058 return; 1059 } 1060 1061 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1062 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1063 if (LV.isObjCIvar() && !LV.isObjCArray()) 1064 // Using array syntax to assigning to what an ivar points to is not 1065 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1066 LV.SetObjCIvar(LV, false); 1067 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1068 // Using array syntax to assigning to what global points to is not 1069 // same as assigning to the global itself. {id *G;} G[i] = 0; 1070 LV.SetGlobalObjCRef(LV, false); 1071 return; 1072 } 1073 1074 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1075 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1076 // We don't know if member is an 'ivar', but this flag is looked at 1077 // only in the context of LV.isObjCIvar(). 1078 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1079 return; 1080 } 1081} 1082 1083static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1084 const Expr *E, const VarDecl *VD) { 1085 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1086 "Var decl must have external storage or be a file var decl!"); 1087 1088 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1089 if (VD->getType()->isReferenceType()) 1090 V = CGF.Builder.CreateLoad(V, "tmp"); 1091 LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1092 setObjCGCLValueClass(CGF.getContext(), E, LV); 1093 return LV; 1094} 1095 1096static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1097 const Expr *E, const FunctionDecl *FD) { 1098 llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD); 1099 if (!FD->hasPrototype()) { 1100 if (const FunctionProtoType *Proto = 1101 FD->getType()->getAs<FunctionProtoType>()) { 1102 // Ugly case: for a K&R-style definition, the type of the definition 1103 // isn't the same as the type of a use. Correct for this with a 1104 // bitcast. 1105 QualType NoProtoType = 1106 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1107 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1108 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp"); 1109 } 1110 } 1111 return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1112} 1113 1114LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1115 const NamedDecl *ND = E->getDecl(); 1116 1117 if (ND->hasAttr<WeakRefAttr>()) { 1118 const ValueDecl* VD = cast<ValueDecl>(ND); 1119 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1120 1121 Qualifiers Quals = MakeQualifiers(E->getType()); 1122 LValue LV = LValue::MakeAddr(Aliasee, Quals); 1123 1124 return LV; 1125 } 1126 1127 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1128 1129 // Check if this is a global variable. 1130 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1131 return EmitGlobalVarDeclLValue(*this, E, VD); 1132 1133 bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 1134 1135 llvm::Value *V = LocalDeclMap[VD]; 1136 if (!V && getContext().getLangOptions().CPlusPlus && 1137 VD->isStaticLocal()) 1138 V = CGM.getStaticLocalDeclAddress(VD); 1139 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1140 1141 Qualifiers Quals = MakeQualifiers(E->getType()); 1142 // local variables do not get their gc attribute set. 1143 // local static? 1144 if (NonGCable) Quals.removeObjCGCAttr(); 1145 1146 if (VD->hasAttr<BlocksAttr>()) { 1147 V = Builder.CreateStructGEP(V, 1, "forwarding"); 1148 V = Builder.CreateLoad(V); 1149 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 1150 VD->getNameAsString()); 1151 } 1152 if (VD->getType()->isReferenceType()) 1153 V = Builder.CreateLoad(V, "tmp"); 1154 LValue LV = LValue::MakeAddr(V, Quals); 1155 LValue::SetObjCNonGC(LV, NonGCable); 1156 setObjCGCLValueClass(getContext(), E, LV); 1157 return LV; 1158 } 1159 1160 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1161 return EmitFunctionDeclLValue(*this, E, FD); 1162 1163 // FIXME: the qualifier check does not seem sufficient here 1164 if (E->getQualifier()) { 1165 const FieldDecl *FD = cast<FieldDecl>(ND); 1166 llvm::Value *V = CGM.EmitPointerToDataMember(FD); 1167 1168 return LValue::MakeAddr(V, MakeQualifiers(FD->getType())); 1169 } 1170 1171 assert(false && "Unhandled DeclRefExpr"); 1172 1173 // an invalid LValue, but the assert will 1174 // ensure that this point is never reached. 1175 return LValue(); 1176} 1177 1178LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1179 return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType())); 1180} 1181 1182LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1183 // __extension__ doesn't affect lvalue-ness. 1184 if (E->getOpcode() == UnaryOperator::Extension) 1185 return EmitLValue(E->getSubExpr()); 1186 1187 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1188 switch (E->getOpcode()) { 1189 default: assert(0 && "Unknown unary operator lvalue!"); 1190 case UnaryOperator::Deref: { 1191 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1192 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1193 1194 Qualifiers Quals = MakeQualifiers(T); 1195 Quals.setAddressSpace(ExprTy.getAddressSpace()); 1196 1197 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals); 1198 // We should not generate __weak write barrier on indirect reference 1199 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1200 // But, we continue to generate __strong write barrier on indirect write 1201 // into a pointer to object. 1202 if (getContext().getLangOptions().ObjC1 && 1203 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 1204 LV.isObjCWeak()) 1205 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1206 return LV; 1207 } 1208 case UnaryOperator::Real: 1209 case UnaryOperator::Imag: { 1210 LValue LV = EmitLValue(E->getSubExpr()); 1211 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 1212 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 1213 Idx, "idx"), 1214 MakeQualifiers(ExprTy)); 1215 } 1216 case UnaryOperator::PreInc: 1217 case UnaryOperator::PreDec: { 1218 LValue LV = EmitLValue(E->getSubExpr()); 1219 bool isInc = E->getOpcode() == UnaryOperator::PreInc; 1220 1221 if (E->getType()->isAnyComplexType()) 1222 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1223 else 1224 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1225 return LV; 1226 } 1227 } 1228} 1229 1230LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1231 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 1232 Qualifiers()); 1233} 1234 1235LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1236 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1237 Qualifiers()); 1238} 1239 1240 1241LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 1242 std::string GlobalVarName; 1243 1244 switch (Type) { 1245 default: assert(0 && "Invalid type"); 1246 case PredefinedExpr::Func: 1247 GlobalVarName = "__func__."; 1248 break; 1249 case PredefinedExpr::Function: 1250 GlobalVarName = "__FUNCTION__."; 1251 break; 1252 case PredefinedExpr::PrettyFunction: 1253 GlobalVarName = "__PRETTY_FUNCTION__."; 1254 break; 1255 } 1256 1257 llvm::StringRef FnName = CurFn->getName(); 1258 if (FnName.startswith("\01")) 1259 FnName = FnName.substr(1); 1260 GlobalVarName += FnName; 1261 1262 std::string FunctionName = 1263 PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurCodeDecl); 1264 1265 llvm::Constant *C = 1266 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1267 return LValue::MakeAddr(C, Qualifiers()); 1268} 1269 1270LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1271 switch (E->getIdentType()) { 1272 default: 1273 return EmitUnsupportedLValue(E, "predefined expression"); 1274 case PredefinedExpr::Func: 1275 case PredefinedExpr::Function: 1276 case PredefinedExpr::PrettyFunction: 1277 return EmitPredefinedFunctionName(E->getIdentType()); 1278 } 1279} 1280 1281llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1282 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1283 1284 // If we are not optimzing, don't collapse all calls to trap in the function 1285 // to the same call, that way, in the debugger they can see which operation 1286 // did in fact fail. If we are optimizing, we collpase all call to trap down 1287 // to just one per function to save on codesize. 1288 if (GCO.OptimizationLevel 1289 && TrapBB) 1290 return TrapBB; 1291 1292 llvm::BasicBlock *Cont = 0; 1293 if (HaveInsertPoint()) { 1294 Cont = createBasicBlock("cont"); 1295 EmitBranch(Cont); 1296 } 1297 TrapBB = createBasicBlock("trap"); 1298 EmitBlock(TrapBB); 1299 1300 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0); 1301 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1302 TrapCall->setDoesNotReturn(); 1303 TrapCall->setDoesNotThrow(); 1304 Builder.CreateUnreachable(); 1305 1306 if (Cont) 1307 EmitBlock(Cont); 1308 return TrapBB; 1309} 1310 1311LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1312 // The index must always be an integer, which is not an aggregate. Emit it. 1313 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1314 QualType IdxTy = E->getIdx()->getType(); 1315 bool IdxSigned = IdxTy->isSignedIntegerType(); 1316 1317 // If the base is a vector type, then we are forming a vector element lvalue 1318 // with this subscript. 1319 if (E->getBase()->getType()->isVectorType()) { 1320 // Emit the vector as an lvalue to get its address. 1321 LValue LHS = EmitLValue(E->getBase()); 1322 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1323 Idx = Builder.CreateIntCast(Idx, 1324 llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); 1325 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1326 E->getBase()->getType().getCVRQualifiers()); 1327 } 1328 1329 // The base must be a pointer, which is not an aggregate. Emit it. 1330 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1331 1332 // Extend or truncate the index type to 32 or 64-bits. 1333 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1334 if (IdxBitwidth != LLVMPointerWidth) 1335 Idx = Builder.CreateIntCast(Idx, 1336 llvm::IntegerType::get(VMContext, LLVMPointerWidth), 1337 IdxSigned, "idxprom"); 1338 1339 // FIXME: As llvm implements the object size checking, this can come out. 1340 if (CatchUndefined) { 1341 if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) { 1342 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1343 if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) { 1344 if (const ConstantArrayType *CAT 1345 = getContext().getAsConstantArrayType(DRE->getType())) { 1346 llvm::APInt Size = CAT->getSize(); 1347 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1348 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1349 llvm::ConstantInt::get(Idx->getType(), Size)), 1350 Cont, getTrapBB()); 1351 EmitBlock(Cont); 1352 } 1353 } 1354 } 1355 } 1356 } 1357 1358 // We know that the pointer points to a type of the correct size, unless the 1359 // size is a VLA or Objective-C interface. 1360 llvm::Value *Address = 0; 1361 if (const VariableArrayType *VAT = 1362 getContext().getAsVariableArrayType(E->getType())) { 1363 llvm::Value *VLASize = GetVLASize(VAT); 1364 1365 Idx = Builder.CreateMul(Idx, VLASize); 1366 1367 QualType BaseType = getContext().getBaseElementType(VAT); 1368 1369 CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType); 1370 Idx = Builder.CreateUDiv(Idx, 1371 llvm::ConstantInt::get(Idx->getType(), 1372 BaseTypeSize.getQuantity())); 1373 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1374 } else if (const ObjCInterfaceType *OIT = 1375 dyn_cast<ObjCInterfaceType>(E->getType())) { 1376 llvm::Value *InterfaceSize = 1377 llvm::ConstantInt::get(Idx->getType(), 1378 getContext().getTypeSizeInChars(OIT).getQuantity()); 1379 1380 Idx = Builder.CreateMul(Idx, InterfaceSize); 1381 1382 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 1383 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 1384 Idx, "arrayidx"); 1385 Address = Builder.CreateBitCast(Address, Base->getType()); 1386 } else { 1387 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1388 } 1389 1390 QualType T = E->getBase()->getType()->getPointeeType(); 1391 assert(!T.isNull() && 1392 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1393 1394 Qualifiers Quals = MakeQualifiers(T); 1395 Quals.setAddressSpace(E->getBase()->getType().getAddressSpace()); 1396 1397 LValue LV = LValue::MakeAddr(Address, Quals); 1398 if (getContext().getLangOptions().ObjC1 && 1399 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) { 1400 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1401 setObjCGCLValueClass(getContext(), E, LV); 1402 } 1403 return LV; 1404} 1405 1406static 1407llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1408 llvm::SmallVector<unsigned, 4> &Elts) { 1409 llvm::SmallVector<llvm::Constant*, 4> CElts; 1410 1411 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1412 CElts.push_back(llvm::ConstantInt::get( 1413 llvm::Type::getInt32Ty(VMContext), Elts[i])); 1414 1415 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 1416} 1417 1418LValue CodeGenFunction:: 1419EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1420 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1421 1422 // Emit the base vector as an l-value. 1423 LValue Base; 1424 1425 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1426 if (E->isArrow()) { 1427 // If it is a pointer to a vector, emit the address and form an lvalue with 1428 // it. 1429 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1430 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1431 Qualifiers Quals = MakeQualifiers(PT->getPointeeType()); 1432 Quals.removeObjCGCAttr(); 1433 Base = LValue::MakeAddr(Ptr, Quals); 1434 } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) { 1435 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1436 // emit the base as an lvalue. 1437 assert(E->getBase()->getType()->isVectorType()); 1438 Base = EmitLValue(E->getBase()); 1439 } else { 1440 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1441 assert(E->getBase()->getType()->getAs<VectorType>() && 1442 "Result must be a vector"); 1443 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1444 1445 // Store the vector to memory (because LValue wants an address). 1446 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1447 Builder.CreateStore(Vec, VecMem); 1448 Base = LValue::MakeAddr(VecMem, Qualifiers()); 1449 } 1450 1451 // Encode the element access list into a vector of unsigned indices. 1452 llvm::SmallVector<unsigned, 4> Indices; 1453 E->getEncodedElementAccess(Indices); 1454 1455 if (Base.isSimple()) { 1456 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 1457 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 1458 Base.getVRQualifiers()); 1459 } 1460 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1461 1462 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1463 llvm::SmallVector<llvm::Constant *, 4> CElts; 1464 1465 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1466 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1467 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1468 else 1469 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1470 } 1471 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1472 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1473 Base.getVRQualifiers()); 1474} 1475 1476LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1477 bool isNonGC = false; 1478 Expr *BaseExpr = E->getBase(); 1479 llvm::Value *BaseValue = NULL; 1480 Qualifiers BaseQuals; 1481 1482 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1483 if (E->isArrow()) { 1484 BaseValue = EmitScalarExpr(BaseExpr); 1485 const PointerType *PTy = 1486 BaseExpr->getType()->getAs<PointerType>(); 1487 BaseQuals = PTy->getPointeeType().getQualifiers(); 1488 } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) || 1489 isa<ObjCImplicitSetterGetterRefExpr>( 1490 BaseExpr->IgnoreParens())) { 1491 RValue RV = EmitObjCPropertyGet(BaseExpr); 1492 BaseValue = RV.getAggregateAddr(); 1493 BaseQuals = BaseExpr->getType().getQualifiers(); 1494 } else { 1495 LValue BaseLV = EmitLValue(BaseExpr); 1496 if (BaseLV.isNonGC()) 1497 isNonGC = true; 1498 // FIXME: this isn't right for bitfields. 1499 BaseValue = BaseLV.getAddress(); 1500 QualType BaseTy = BaseExpr->getType(); 1501 BaseQuals = BaseTy.getQualifiers(); 1502 } 1503 1504 NamedDecl *ND = E->getMemberDecl(); 1505 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1506 LValue LV = EmitLValueForField(BaseValue, Field, 1507 BaseQuals.getCVRQualifiers()); 1508 LValue::SetObjCNonGC(LV, isNonGC); 1509 setObjCGCLValueClass(getContext(), E, LV); 1510 return LV; 1511 } 1512 1513 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1514 return EmitGlobalVarDeclLValue(*this, E, VD); 1515 1516 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1517 return EmitFunctionDeclLValue(*this, E, FD); 1518 1519 assert(false && "Unhandled member declaration!"); 1520 return LValue(); 1521} 1522 1523LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1524 const FieldDecl* Field, 1525 unsigned CVRQualifiers) { 1526 const CGRecordLayout &RL = 1527 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1528 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1529 return LValue::MakeBitfield(BaseValue, Info, 1530 Field->getType().getCVRQualifiers()|CVRQualifiers); 1531} 1532 1533LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1534 const FieldDecl* Field, 1535 unsigned CVRQualifiers) { 1536 if (Field->isBitField()) 1537 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1538 1539 const CGRecordLayout &RL = 1540 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1541 unsigned idx = RL.getLLVMFieldNo(Field); 1542 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1543 1544 // Match union field type. 1545 if (Field->getParent()->isUnion()) { 1546 const llvm::Type *FieldTy = 1547 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1548 const llvm::PointerType * BaseTy = 1549 cast<llvm::PointerType>(BaseValue->getType()); 1550 unsigned AS = BaseTy->getAddressSpace(); 1551 V = Builder.CreateBitCast(V, 1552 llvm::PointerType::get(FieldTy, AS), 1553 "tmp"); 1554 } 1555 if (Field->getType()->isReferenceType()) 1556 V = Builder.CreateLoad(V, "tmp"); 1557 1558 Qualifiers Quals = MakeQualifiers(Field->getType()); 1559 Quals.addCVRQualifiers(CVRQualifiers); 1560 // __weak attribute on a field is ignored. 1561 if (Quals.getObjCGCAttr() == Qualifiers::Weak) 1562 Quals.removeObjCGCAttr(); 1563 1564 return LValue::MakeAddr(V, Quals); 1565} 1566 1567LValue 1568CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue, 1569 const FieldDecl* Field, 1570 unsigned CVRQualifiers) { 1571 QualType FieldType = Field->getType(); 1572 1573 if (!FieldType->isReferenceType()) 1574 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1575 1576 const CGRecordLayout &RL = 1577 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1578 unsigned idx = RL.getLLVMFieldNo(Field); 1579 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1580 1581 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1582 1583 return LValue::MakeAddr(V, MakeQualifiers(FieldType)); 1584} 1585 1586LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1587 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1588 const Expr* InitExpr = E->getInitializer(); 1589 LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType())); 1590 1591 EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false); 1592 1593 return Result; 1594} 1595 1596LValue 1597CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) { 1598 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 1599 if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) { 1600 Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS(); 1601 if (Live) 1602 return EmitLValue(Live); 1603 } 1604 1605 if (!E->getLHS()) 1606 return EmitUnsupportedLValue(E, "conditional operator with missing LHS"); 1607 1608 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1609 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1610 llvm::BasicBlock *ContBlock = createBasicBlock("cond.end"); 1611 1612 EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 1613 1614 // Any temporaries created here are conditional. 1615 BeginConditionalBranch(); 1616 EmitBlock(LHSBlock); 1617 LValue LHS = EmitLValue(E->getLHS()); 1618 EndConditionalBranch(); 1619 1620 if (!LHS.isSimple()) 1621 return EmitUnsupportedLValue(E, "conditional operator"); 1622 1623 // FIXME: We shouldn't need an alloca for this. 1624 llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp"); 1625 Builder.CreateStore(LHS.getAddress(), Temp); 1626 EmitBranch(ContBlock); 1627 1628 // Any temporaries created here are conditional. 1629 BeginConditionalBranch(); 1630 EmitBlock(RHSBlock); 1631 LValue RHS = EmitLValue(E->getRHS()); 1632 EndConditionalBranch(); 1633 if (!RHS.isSimple()) 1634 return EmitUnsupportedLValue(E, "conditional operator"); 1635 1636 Builder.CreateStore(RHS.getAddress(), Temp); 1637 EmitBranch(ContBlock); 1638 1639 EmitBlock(ContBlock); 1640 1641 Temp = Builder.CreateLoad(Temp, "lv"); 1642 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1643 } 1644 1645 // ?: here should be an aggregate. 1646 assert((hasAggregateLLVMType(E->getType()) && 1647 !E->getType()->isAnyComplexType()) && 1648 "Unexpected conditional operator!"); 1649 1650 return EmitAggExprToLValue(E); 1651} 1652 1653/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 1654/// If the cast is a dynamic_cast, we can have the usual lvalue result, 1655/// otherwise if a cast is needed by the code generator in an lvalue context, 1656/// then it must mean that we need the address of an aggregate in order to 1657/// access one of its fields. This can happen for all the reasons that casts 1658/// are permitted with aggregate result, including noop aggregate casts, and 1659/// cast from scalar to union. 1660LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1661 switch (E->getCastKind()) { 1662 default: 1663 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 1664 1665 case CastExpr::CK_Dynamic: { 1666 LValue LV = EmitLValue(E->getSubExpr()); 1667 llvm::Value *V = LV.getAddress(); 1668 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 1669 return LValue::MakeAddr(EmitDynamicCast(V, DCE), 1670 MakeQualifiers(E->getType())); 1671 } 1672 1673 case CastExpr::CK_NoOp: 1674 case CastExpr::CK_ConstructorConversion: 1675 case CastExpr::CK_UserDefinedConversion: 1676 case CastExpr::CK_AnyPointerToObjCPointerCast: 1677 return EmitLValue(E->getSubExpr()); 1678 1679 case CastExpr::CK_UncheckedDerivedToBase: 1680 case CastExpr::CK_DerivedToBase: { 1681 const RecordType *DerivedClassTy = 1682 E->getSubExpr()->getType()->getAs<RecordType>(); 1683 CXXRecordDecl *DerivedClassDecl = 1684 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1685 1686 LValue LV = EmitLValue(E->getSubExpr()); 1687 1688 // Perform the derived-to-base conversion 1689 llvm::Value *Base = 1690 GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, 1691 E->getBasePath(), /*NullCheckValue=*/false); 1692 1693 return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); 1694 } 1695 case CastExpr::CK_ToUnion: 1696 return EmitAggExprToLValue(E); 1697 case CastExpr::CK_BaseToDerived: { 1698 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 1699 CXXRecordDecl *DerivedClassDecl = 1700 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1701 1702 LValue LV = EmitLValue(E->getSubExpr()); 1703 1704 // Perform the base-to-derived conversion 1705 llvm::Value *Derived = 1706 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 1707 E->getBasePath(),/*NullCheckValue=*/false); 1708 1709 return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); 1710 } 1711 case CastExpr::CK_BitCast: { 1712 // This must be a reinterpret_cast (or c-style equivalent). 1713 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 1714 1715 LValue LV = EmitLValue(E->getSubExpr()); 1716 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 1717 ConvertType(CE->getTypeAsWritten())); 1718 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1719 } 1720 } 1721} 1722 1723LValue CodeGenFunction::EmitNullInitializationLValue( 1724 const CXXZeroInitValueExpr *E) { 1725 QualType Ty = E->getType(); 1726 LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty)); 1727 EmitMemSetToZero(LV.getAddress(), Ty); 1728 return LV; 1729} 1730 1731//===--------------------------------------------------------------------===// 1732// Expression Emission 1733//===--------------------------------------------------------------------===// 1734 1735 1736RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 1737 ReturnValueSlot ReturnValue) { 1738 // Builtins never have block type. 1739 if (E->getCallee()->getType()->isBlockPointerType()) 1740 return EmitBlockCallExpr(E, ReturnValue); 1741 1742 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1743 return EmitCXXMemberCallExpr(CE, ReturnValue); 1744 1745 const Decl *TargetDecl = 0; 1746 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1747 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1748 TargetDecl = DRE->getDecl(); 1749 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1750 if (unsigned builtinID = FD->getBuiltinID()) 1751 return EmitBuiltinExpr(FD, builtinID, E); 1752 } 1753 } 1754 1755 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1756 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1757 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 1758 1759 if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 1760 // C++ [expr.pseudo]p1: 1761 // The result shall only be used as the operand for the function call 1762 // operator (), and the result of such a call has type void. The only 1763 // effect is the evaluation of the postfix-expression before the dot or 1764 // arrow. 1765 EmitScalarExpr(E->getCallee()); 1766 return RValue::get(0); 1767 } 1768 1769 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1770 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 1771 E->arg_begin(), E->arg_end(), TargetDecl); 1772} 1773 1774LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1775 // Comma expressions just emit their LHS then their RHS as an l-value. 1776 if (E->getOpcode() == BinaryOperator::Comma) { 1777 EmitAnyExpr(E->getLHS()); 1778 EnsureInsertPoint(); 1779 return EmitLValue(E->getRHS()); 1780 } 1781 1782 if (E->getOpcode() == BinaryOperator::PtrMemD || 1783 E->getOpcode() == BinaryOperator::PtrMemI) 1784 return EmitPointerToDataMemberBinaryExpr(E); 1785 1786 // Can only get l-value for binary operator expressions which are a 1787 // simple assignment of aggregate type. 1788 if (E->getOpcode() != BinaryOperator::Assign) 1789 return EmitUnsupportedLValue(E, "binary l-value expression"); 1790 1791 if (!hasAggregateLLVMType(E->getType())) { 1792 // Emit the LHS as an l-value. 1793 LValue LV = EmitLValue(E->getLHS()); 1794 1795 llvm::Value *RHS = EmitScalarExpr(E->getRHS()); 1796 EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 1797 E->getType()); 1798 return LV; 1799 } 1800 1801 return EmitAggExprToLValue(E); 1802} 1803 1804LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1805 RValue RV = EmitCallExpr(E); 1806 1807 if (!RV.isScalar()) 1808 return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType())); 1809 1810 assert(E->getCallReturnType()->isReferenceType() && 1811 "Can't have a scalar return unless the return type is a " 1812 "reference type!"); 1813 1814 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1815} 1816 1817LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1818 // FIXME: This shouldn't require another copy. 1819 return EmitAggExprToLValue(E); 1820} 1821 1822LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1823 llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp"); 1824 EmitCXXConstructExpr(Temp, E); 1825 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1826} 1827 1828LValue 1829CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 1830 llvm::Value *Temp = EmitCXXTypeidExpr(E); 1831 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1832} 1833 1834LValue 1835CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1836 LValue LV = EmitLValue(E->getSubExpr()); 1837 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1838 return LV; 1839} 1840 1841LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1842 // Can only get l-value for message expression returning aggregate type 1843 RValue RV = EmitObjCMessageExpr(E); 1844 // FIXME: can this be volatile? 1845 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1846} 1847 1848llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1849 const ObjCIvarDecl *Ivar) { 1850 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1851} 1852 1853LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1854 llvm::Value *BaseValue, 1855 const ObjCIvarDecl *Ivar, 1856 unsigned CVRQualifiers) { 1857 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1858 Ivar, CVRQualifiers); 1859} 1860 1861LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1862 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1863 llvm::Value *BaseValue = 0; 1864 const Expr *BaseExpr = E->getBase(); 1865 Qualifiers BaseQuals; 1866 QualType ObjectTy; 1867 if (E->isArrow()) { 1868 BaseValue = EmitScalarExpr(BaseExpr); 1869 ObjectTy = BaseExpr->getType()->getPointeeType(); 1870 BaseQuals = ObjectTy.getQualifiers(); 1871 } else { 1872 LValue BaseLV = EmitLValue(BaseExpr); 1873 // FIXME: this isn't right for bitfields. 1874 BaseValue = BaseLV.getAddress(); 1875 ObjectTy = BaseExpr->getType(); 1876 BaseQuals = ObjectTy.getQualifiers(); 1877 } 1878 1879 LValue LV = 1880 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 1881 BaseQuals.getCVRQualifiers()); 1882 setObjCGCLValueClass(getContext(), E, LV); 1883 return LV; 1884} 1885 1886LValue 1887CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1888 // This is a special l-value that just issues sends when we load or store 1889 // through it. 1890 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1891} 1892 1893LValue CodeGenFunction::EmitObjCKVCRefLValue( 1894 const ObjCImplicitSetterGetterRefExpr *E) { 1895 // This is a special l-value that just issues sends when we load or store 1896 // through it. 1897 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1898} 1899 1900LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1901 return EmitUnsupportedLValue(E, "use of super"); 1902} 1903 1904LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1905 // Can only get l-value for message expression returning aggregate type 1906 RValue RV = EmitAnyExprToTemp(E); 1907 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1908} 1909 1910RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 1911 ReturnValueSlot ReturnValue, 1912 CallExpr::const_arg_iterator ArgBeg, 1913 CallExpr::const_arg_iterator ArgEnd, 1914 const Decl *TargetDecl) { 1915 // Get the actual function type. The callee type will always be a pointer to 1916 // function type or a block pointer type. 1917 assert(CalleeType->isFunctionPointerType() && 1918 "Call must have function pointer type!"); 1919 1920 CalleeType = getContext().getCanonicalType(CalleeType); 1921 1922 const FunctionType *FnType 1923 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 1924 QualType ResultType = FnType->getResultType(); 1925 1926 CallArgList Args; 1927 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 1928 1929 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType), 1930 Callee, ReturnValue, Args, TargetDecl); 1931} 1932 1933LValue CodeGenFunction:: 1934EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 1935 llvm::Value *BaseV; 1936 if (E->getOpcode() == BinaryOperator::PtrMemI) 1937 BaseV = EmitScalarExpr(E->getLHS()); 1938 else 1939 BaseV = EmitLValue(E->getLHS()).getAddress(); 1940 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext()); 1941 BaseV = Builder.CreateBitCast(BaseV, i8Ty); 1942 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 1943 llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr"); 1944 1945 QualType Ty = E->getRHS()->getType(); 1946 Ty = Ty->getAs<MemberPointerType>()->getPointeeType(); 1947 1948 const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty)); 1949 AddV = Builder.CreateBitCast(AddV, PType); 1950 return LValue::MakeAddr(AddV, MakeQualifiers(Ty)); 1951} 1952 1953