CGExpr.cpp revision 52f08bcbb81c750ed62b53ed0b34aff16143b877
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGObjCRuntime.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/DeclObjC.h" 20#include "llvm/Target/TargetData.h" 21using namespace clang; 22using namespace CodeGen; 23 24//===--------------------------------------------------------------------===// 25// Miscellaneous Helper Methods 26//===--------------------------------------------------------------------===// 27 28/// CreateTempAlloca - This creates a alloca and inserts it into the entry 29/// block. 30llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 31 const llvm::Twine &Name) { 32 if (!Builder.isNamePreserving()) 33 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 34 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 35} 36 37/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 38/// expression and compare the result against zero, returning an Int1Ty value. 39llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 40 QualType BoolTy = getContext().BoolTy; 41 if (!E->getType()->isAnyComplexType()) 42 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 43 44 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 45} 46 47/// EmitAnyExpr - Emit code to compute the specified expression which can have 48/// any type. The result is returned as an RValue struct. If this is an 49/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the 50/// result should be returned. 51RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 52 bool IsAggLocVolatile, bool IgnoreResult, 53 bool IsInitializer) { 54 if (!hasAggregateLLVMType(E->getType())) 55 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 56 else if (E->getType()->isAnyComplexType()) 57 return RValue::getComplex(EmitComplexExpr(E, false, false, 58 IgnoreResult, IgnoreResult)); 59 60 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 61 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 62} 63 64/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 65/// always be accessible even if no aggregate location is provided. 66RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 67 bool IsAggLocVolatile, 68 bool IsInitializer) { 69 llvm::Value *AggLoc = 0; 70 71 if (hasAggregateLLVMType(E->getType()) && 72 !E->getType()->isAnyComplexType()) 73 AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); 74 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 75 IsInitializer); 76} 77 78RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 79 QualType DestType, 80 bool IsInitializer) { 81 bool ShouldDestroyTemporaries = false; 82 unsigned OldNumLiveTemporaries = 0; 83 84 if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { 85 ShouldDestroyTemporaries = TE->shouldDestroyTemporaries(); 86 87 if (ShouldDestroyTemporaries) { 88 // Keep track of the current cleanup stack depth. 89 OldNumLiveTemporaries = LiveTemporaries.size(); 90 } 91 92 E = TE->getSubExpr(); 93 } 94 95 RValue Val; 96 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 97 // Emit the expr as an lvalue. 98 LValue LV = EmitLValue(E); 99 if (LV.isSimple()) 100 return RValue::get(LV.getAddress()); 101 Val = EmitLoadOfLValue(LV, E->getType()); 102 103 if (ShouldDestroyTemporaries) { 104 // Pop temporaries. 105 while (LiveTemporaries.size() > OldNumLiveTemporaries) 106 PopCXXTemporary(); 107 } 108 } else { 109 const CXXRecordDecl *BaseClassDecl = 0; 110 const CXXRecordDecl *DerivedClassDecl = 0; 111 112 if (const CastExpr *CE = 113 dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) { 114 if (CE->getCastKind() == CastExpr::CK_DerivedToBase) { 115 E = CE->getSubExpr(); 116 117 BaseClassDecl = 118 cast<CXXRecordDecl>(CE->getType()->getAs<RecordType>()->getDecl()); 119 DerivedClassDecl = 120 cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 121 } 122 } 123 124 Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, 125 IsInitializer); 126 127 if (ShouldDestroyTemporaries) { 128 // Pop temporaries. 129 while (LiveTemporaries.size() > OldNumLiveTemporaries) 130 PopCXXTemporary(); 131 } 132 133 if (IsInitializer) { 134 // We might have to destroy the temporary variable. 135 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 136 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 137 if (!ClassDecl->hasTrivialDestructor()) { 138 const CXXDestructorDecl *Dtor = 139 ClassDecl->getDestructor(getContext()); 140 141 CleanupScope scope(*this); 142 EmitCXXDestructorCall(Dtor, Dtor_Complete, Val.getAggregateAddr()); 143 } 144 } 145 } 146 } 147 148 // Check if need to perform the derived-to-base cast. 149 if (BaseClassDecl) { 150 llvm::Value *Derived = Val.getAggregateAddr(); 151 152 llvm::Value *Base = 153 GetAddressCXXOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl, 154 /*NullCheckValue=*/false); 155 return RValue::get(Base); 156 } 157 } 158 159 if (Val.isAggregate()) { 160 Val = RValue::get(Val.getAggregateAddr()); 161 } else { 162 // Create a temporary variable that we can bind the reference to. 163 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), 164 "reftmp"); 165 if (Val.isScalar()) 166 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 167 else 168 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 169 Val = RValue::get(Temp); 170 } 171 172 return Val; 173} 174 175 176/// getAccessedFieldNo - Given an encoded value and a result number, return the 177/// input field number being accessed. 178unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 179 const llvm::Constant *Elts) { 180 if (isa<llvm::ConstantAggregateZero>(Elts)) 181 return 0; 182 183 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 184} 185 186 187//===----------------------------------------------------------------------===// 188// LValue Expression Emission 189//===----------------------------------------------------------------------===// 190 191RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 192 if (Ty->isVoidType()) { 193 return RValue::get(0); 194 } else if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 195 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 196 llvm::Value *U = llvm::UndefValue::get(EltTy); 197 return RValue::getComplex(std::make_pair(U, U)); 198 } else if (hasAggregateLLVMType(Ty)) { 199 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 200 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 201 } else { 202 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 203 } 204} 205 206RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 207 const char *Name) { 208 ErrorUnsupported(E, Name); 209 return GetUndefRValue(E->getType()); 210} 211 212LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 213 const char *Name) { 214 ErrorUnsupported(E, Name); 215 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 216 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 217 MakeQualifiers(E->getType())); 218} 219 220/// EmitLValue - Emit code to compute a designator that specifies the location 221/// of the expression. 222/// 223/// This can return one of two things: a simple address or a bitfield reference. 224/// In either case, the LLVM Value* in the LValue structure is guaranteed to be 225/// an LLVM pointer type. 226/// 227/// If this returns a bitfield reference, nothing about the pointee type of the 228/// LLVM value is known: For example, it may not be a pointer to an integer. 229/// 230/// If this returns a normal address, and if the lvalue's C type is fixed size, 231/// this method guarantees that the returned pointer type will point to an LLVM 232/// type of the same size of the lvalue's type. If the lvalue has a variable 233/// length type, this is not possible. 234/// 235LValue CodeGenFunction::EmitLValue(const Expr *E) { 236 switch (E->getStmtClass()) { 237 default: return EmitUnsupportedLValue(E, "l-value expression"); 238 239 case Expr::BinaryOperatorClass: 240 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 241 case Expr::CallExprClass: 242 case Expr::CXXMemberCallExprClass: 243 case Expr::CXXOperatorCallExprClass: 244 return EmitCallExprLValue(cast<CallExpr>(E)); 245 case Expr::VAArgExprClass: 246 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 247 case Expr::DeclRefExprClass: 248 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 249 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 250 case Expr::PredefinedExprClass: 251 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 252 case Expr::StringLiteralClass: 253 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 254 case Expr::ObjCEncodeExprClass: 255 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 256 257 case Expr::BlockDeclRefExprClass: 258 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 259 260 case Expr::CXXConditionDeclExprClass: 261 return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); 262 case Expr::CXXTemporaryObjectExprClass: 263 case Expr::CXXConstructExprClass: 264 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 265 case Expr::CXXBindTemporaryExprClass: 266 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 267 case Expr::CXXExprWithTemporariesClass: 268 return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); 269 270 case Expr::ObjCMessageExprClass: 271 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 272 case Expr::ObjCIvarRefExprClass: 273 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 274 case Expr::ObjCPropertyRefExprClass: 275 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 276 case Expr::ObjCImplicitSetterGetterRefExprClass: 277 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 278 case Expr::ObjCSuperExprClass: 279 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 280 281 case Expr::StmtExprClass: 282 return EmitStmtExprLValue(cast<StmtExpr>(E)); 283 case Expr::UnaryOperatorClass: 284 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 285 case Expr::ArraySubscriptExprClass: 286 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 287 case Expr::ExtVectorElementExprClass: 288 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 289 case Expr::MemberExprClass: 290 return EmitMemberExpr(cast<MemberExpr>(E)); 291 case Expr::CompoundLiteralExprClass: 292 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 293 case Expr::ConditionalOperatorClass: 294 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 295 case Expr::ChooseExprClass: 296 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 297 case Expr::ImplicitCastExprClass: 298 case Expr::CStyleCastExprClass: 299 case Expr::CXXFunctionalCastExprClass: 300 case Expr::CXXStaticCastExprClass: 301 case Expr::CXXDynamicCastExprClass: 302 case Expr::CXXReinterpretCastExprClass: 303 case Expr::CXXConstCastExprClass: 304 return EmitCastLValue(cast<CastExpr>(E)); 305 case Expr::CXXZeroInitValueExprClass: 306 return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); 307 } 308} 309 310llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 311 QualType Ty) { 312 llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); 313 314 // Bool can have different representation in memory than in registers. 315 if (Ty->isBooleanType()) 316 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 317 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 318 319 return V; 320} 321 322void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 323 bool Volatile, QualType Ty) { 324 325 if (Ty->isBooleanType()) { 326 // Bool can have different representation in memory than in registers. 327 const llvm::Type *SrcTy = Value->getType(); 328 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 329 if (DstPtr->getElementType() != SrcTy) { 330 const llvm::Type *MemTy = 331 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 332 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 333 } 334 } 335 Builder.CreateStore(Value, Addr, Volatile); 336} 337 338/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 339/// method emits the address of the lvalue, then loads the result as an rvalue, 340/// returning the rvalue. 341RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 342 if (LV.isObjCWeak()) { 343 // load of a __weak object. 344 llvm::Value *AddrWeakObj = LV.getAddress(); 345 llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, 346 AddrWeakObj); 347 return RValue::get(read_weak); 348 } 349 350 if (LV.isSimple()) { 351 llvm::Value *Ptr = LV.getAddress(); 352 const llvm::Type *EltTy = 353 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 354 355 // Simple scalar l-value. 356 if (EltTy->isSingleValueType()) 357 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 358 ExprType)); 359 360 assert(ExprType->isFunctionType() && "Unknown scalar value"); 361 return RValue::get(Ptr); 362 } 363 364 if (LV.isVectorElt()) { 365 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 366 LV.isVolatileQualified(), "tmp"); 367 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 368 "vecext")); 369 } 370 371 // If this is a reference to a subset of the elements of a vector, either 372 // shuffle the input or extract/insert them as appropriate. 373 if (LV.isExtVectorElt()) 374 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 375 376 if (LV.isBitfield()) 377 return EmitLoadOfBitfieldLValue(LV, ExprType); 378 379 if (LV.isPropertyRef()) 380 return EmitLoadOfPropertyRefLValue(LV, ExprType); 381 382 assert(LV.isKVCRef() && "Unknown LValue type!"); 383 return EmitLoadOfKVCRefLValue(LV, ExprType); 384} 385 386RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 387 QualType ExprType) { 388 unsigned StartBit = LV.getBitfieldStartBit(); 389 unsigned BitfieldSize = LV.getBitfieldSize(); 390 llvm::Value *Ptr = LV.getBitfieldAddr(); 391 392 const llvm::Type *EltTy = 393 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 394 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 395 396 // In some cases the bitfield may straddle two memory locations. Currently we 397 // load the entire bitfield, then do the magic to sign-extend it if 398 // necessary. This results in somewhat more code than necessary for the common 399 // case (one load), since two shifts accomplish both the masking and sign 400 // extension. 401 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 402 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 403 404 // Shift to proper location. 405 if (StartBit) 406 Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), 407 "bf.lo"); 408 409 // Mask off unused bits. 410 llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext, 411 llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 412 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 413 414 // Fetch the high bits if necessary. 415 if (LowBits < BitfieldSize) { 416 unsigned HighBits = BitfieldSize - LowBits; 417 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 418 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 419 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 420 LV.isVolatileQualified(), 421 "tmp"); 422 423 // Mask off unused bits. 424 llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext, 425 llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 426 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 427 428 // Shift to proper location and or in to bitfield value. 429 HighVal = Builder.CreateShl(HighVal, 430 llvm::ConstantInt::get(EltTy, LowBits)); 431 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 432 } 433 434 // Sign extend if necessary. 435 if (LV.isBitfieldSigned()) { 436 llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, 437 EltTySize - BitfieldSize); 438 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 439 ExtraBits, "bf.val.sext"); 440 } 441 442 // The bitfield type and the normal type differ when the storage sizes differ 443 // (currently just _Bool). 444 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 445 446 return RValue::get(Val); 447} 448 449RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 450 QualType ExprType) { 451 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 452} 453 454RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 455 QualType ExprType) { 456 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 457} 458 459// If this is a reference to a subset of the elements of a vector, create an 460// appropriate shufflevector. 461RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 462 QualType ExprType) { 463 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 464 LV.isVolatileQualified(), "tmp"); 465 466 const llvm::Constant *Elts = LV.getExtVectorElts(); 467 468 // If the result of the expression is a non-vector type, we must be extracting 469 // a single element. Just codegen as an extractelement. 470 const VectorType *ExprVT = ExprType->getAs<VectorType>(); 471 if (!ExprVT) { 472 unsigned InIdx = getAccessedFieldNo(0, Elts); 473 llvm::Value *Elt = llvm::ConstantInt::get( 474 llvm::Type::getInt32Ty(VMContext), InIdx); 475 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 476 } 477 478 // Always use shuffle vector to try to retain the original program structure 479 unsigned NumResultElts = ExprVT->getNumElements(); 480 481 llvm::SmallVector<llvm::Constant*, 4> Mask; 482 for (unsigned i = 0; i != NumResultElts; ++i) { 483 unsigned InIdx = getAccessedFieldNo(i, Elts); 484 Mask.push_back(llvm::ConstantInt::get( 485 llvm::Type::getInt32Ty(VMContext), InIdx)); 486 } 487 488 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 489 Vec = Builder.CreateShuffleVector(Vec, 490 llvm::UndefValue::get(Vec->getType()), 491 MaskV, "tmp"); 492 return RValue::get(Vec); 493} 494 495 496 497/// EmitStoreThroughLValue - Store the specified rvalue into the specified 498/// lvalue, where both are guaranteed to the have the same type, and that type 499/// is 'Ty'. 500void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 501 QualType Ty) { 502 if (!Dst.isSimple()) { 503 if (Dst.isVectorElt()) { 504 // Read/modify/write the vector, inserting the new element. 505 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 506 Dst.isVolatileQualified(), "tmp"); 507 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 508 Dst.getVectorIdx(), "vecins"); 509 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 510 return; 511 } 512 513 // If this is an update of extended vector elements, insert them as 514 // appropriate. 515 if (Dst.isExtVectorElt()) 516 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 517 518 if (Dst.isBitfield()) 519 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 520 521 if (Dst.isPropertyRef()) 522 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 523 524 if (Dst.isKVCRef()) 525 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 526 527 assert(0 && "Unknown LValue type"); 528 } 529 530 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 531 // load of a __weak object. 532 llvm::Value *LvalueDst = Dst.getAddress(); 533 llvm::Value *src = Src.getScalarVal(); 534 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 535 return; 536 } 537 538 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 539 // load of a __strong object. 540 llvm::Value *LvalueDst = Dst.getAddress(); 541 llvm::Value *src = Src.getScalarVal(); 542 if (Dst.isObjCIvar()) { 543 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 544 const llvm::Type *ResultType = ConvertType(getContext().LongTy); 545 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 546 llvm::Value *dst = RHS; 547 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 548 llvm::Value *LHS = 549 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 550 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 551 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 552 BytesBetween); 553 } 554 else if (Dst.isGlobalObjCRef()) 555 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 556 else 557 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 558 return; 559 } 560 561 assert(Src.isScalar() && "Can't emit an agg store with this method"); 562 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 563 Dst.isVolatileQualified(), Ty); 564} 565 566void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 567 QualType Ty, 568 llvm::Value **Result) { 569 unsigned StartBit = Dst.getBitfieldStartBit(); 570 unsigned BitfieldSize = Dst.getBitfieldSize(); 571 llvm::Value *Ptr = Dst.getBitfieldAddr(); 572 573 const llvm::Type *EltTy = 574 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 575 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 576 577 // Get the new value, cast to the appropriate type and masked to exactly the 578 // size of the bit-field. 579 llvm::Value *SrcVal = Src.getScalarVal(); 580 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 581 llvm::Constant *Mask = llvm::ConstantInt::get(VMContext, 582 llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 583 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 584 585 // Return the new value of the bit-field, if requested. 586 if (Result) { 587 // Cast back to the proper type for result. 588 const llvm::Type *SrcTy = SrcVal->getType(); 589 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 590 "bf.reload.val"); 591 592 // Sign extend if necessary. 593 if (Dst.isBitfieldSigned()) { 594 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 595 llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, 596 SrcTySize - BitfieldSize); 597 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 598 ExtraBits, "bf.reload.sext"); 599 } 600 601 *Result = SrcTrunc; 602 } 603 604 // In some cases the bitfield may straddle two memory locations. Emit the low 605 // part first and check to see if the high needs to be done. 606 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 607 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 608 "bf.prev.low"); 609 610 // Compute the mask for zero-ing the low part of this bitfield. 611 llvm::Constant *InvMask = 612 llvm::ConstantInt::get(VMContext, 613 ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits)); 614 615 // Compute the new low part as 616 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 617 // with the shift of NewVal implicitly stripping the high bits. 618 llvm::Value *NewLowVal = 619 Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), 620 "bf.value.lo"); 621 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 622 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 623 624 // Write back. 625 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 626 627 // If the low part doesn't cover the bitfield emit a high part. 628 if (LowBits < BitfieldSize) { 629 unsigned HighBits = BitfieldSize - LowBits; 630 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 631 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 632 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 633 Dst.isVolatileQualified(), 634 "bf.prev.hi"); 635 636 // Compute the mask for zero-ing the high part of this bitfield. 637 llvm::Constant *InvMask = 638 llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize, 639 HighBits)); 640 641 // Compute the new high part as 642 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 643 // where the high bits of NewVal have already been cleared and the 644 // shift stripping the low bits. 645 llvm::Value *NewHighVal = 646 Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), 647 "bf.value.high"); 648 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 649 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 650 651 // Write back. 652 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 653 } 654} 655 656void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 657 LValue Dst, 658 QualType Ty) { 659 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 660} 661 662void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 663 LValue Dst, 664 QualType Ty) { 665 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 666} 667 668void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 669 LValue Dst, 670 QualType Ty) { 671 // This access turns into a read/modify/write of the vector. Load the input 672 // value now. 673 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 674 Dst.isVolatileQualified(), "tmp"); 675 const llvm::Constant *Elts = Dst.getExtVectorElts(); 676 677 llvm::Value *SrcVal = Src.getScalarVal(); 678 679 if (const VectorType *VTy = Ty->getAs<VectorType>()) { 680 unsigned NumSrcElts = VTy->getNumElements(); 681 unsigned NumDstElts = 682 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 683 if (NumDstElts == NumSrcElts) { 684 // Use shuffle vector is the src and destination are the same number of 685 // elements and restore the vector mask since it is on the side it will be 686 // stored. 687 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 688 for (unsigned i = 0; i != NumSrcElts; ++i) { 689 unsigned InIdx = getAccessedFieldNo(i, Elts); 690 Mask[InIdx] = llvm::ConstantInt::get( 691 llvm::Type::getInt32Ty(VMContext), i); 692 } 693 694 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 695 Vec = Builder.CreateShuffleVector(SrcVal, 696 llvm::UndefValue::get(Vec->getType()), 697 MaskV, "tmp"); 698 } else if (NumDstElts > NumSrcElts) { 699 // Extended the source vector to the same length and then shuffle it 700 // into the destination. 701 // FIXME: since we're shuffling with undef, can we just use the indices 702 // into that? This could be simpler. 703 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 704 unsigned i; 705 for (i = 0; i != NumSrcElts; ++i) 706 ExtMask.push_back(llvm::ConstantInt::get( 707 llvm::Type::getInt32Ty(VMContext), i)); 708 for (; i != NumDstElts; ++i) 709 ExtMask.push_back(llvm::UndefValue::get( 710 llvm::Type::getInt32Ty(VMContext))); 711 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 712 ExtMask.size()); 713 llvm::Value *ExtSrcVal = 714 Builder.CreateShuffleVector(SrcVal, 715 llvm::UndefValue::get(SrcVal->getType()), 716 ExtMaskV, "tmp"); 717 // build identity 718 llvm::SmallVector<llvm::Constant*, 4> Mask; 719 for (unsigned i = 0; i != NumDstElts; ++i) { 720 Mask.push_back(llvm::ConstantInt::get( 721 llvm::Type::getInt32Ty(VMContext), i)); 722 } 723 // modify when what gets shuffled in 724 for (unsigned i = 0; i != NumSrcElts; ++i) { 725 unsigned Idx = getAccessedFieldNo(i, Elts); 726 Mask[Idx] = llvm::ConstantInt::get( 727 llvm::Type::getInt32Ty(VMContext), i+NumDstElts); 728 } 729 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 730 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 731 } else { 732 // We should never shorten the vector 733 assert(0 && "unexpected shorten vector length"); 734 } 735 } else { 736 // If the Src is a scalar (not a vector) it must be updating one element. 737 unsigned InIdx = getAccessedFieldNo(0, Elts); 738 llvm::Value *Elt = llvm::ConstantInt::get( 739 llvm::Type::getInt32Ty(VMContext), InIdx); 740 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 741 } 742 743 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 744} 745 746// setObjCGCLValueClass - sets class of he lvalue for the purpose of 747// generating write-barries API. It is currently a global, ivar, 748// or neither. 749static 750void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) { 751 if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC) 752 return; 753 754 if (isa<ObjCIvarRefExpr>(E)) { 755 LV.SetObjCIvar(LV, true); 756 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 757 LV.setBaseIvarExp(Exp->getBase()); 758 LV.SetObjCArray(LV, E->getType()->isArrayType()); 759 return; 760 } 761 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 762 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 763 if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) || 764 VD->isFileVarDecl()) 765 LV.SetGlobalObjCRef(LV, true); 766 } 767 LV.SetObjCArray(LV, E->getType()->isArrayType()); 768 } 769 else if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) 770 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 771 else if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 772 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 773 if (LV.isObjCIvar()) { 774 // If cast is to a structure pointer, follow gcc's behavior and make it 775 // a non-ivar write-barrier. 776 QualType ExpTy = E->getType(); 777 if (ExpTy->isPointerType()) 778 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 779 if (ExpTy->isRecordType()) 780 LV.SetObjCIvar(LV, false); 781 } 782 } 783 else if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) 784 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 785 else if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) 786 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 787 else if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 788 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 789 if (LV.isObjCIvar() && !LV.isObjCArray()) 790 // Using array syntax to assigning to what an ivar points to is not 791 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 792 LV.SetObjCIvar(LV, false); 793 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 794 // Using array syntax to assigning to what global points to is not 795 // same as assigning to the global itself. {id *G;} G[i] = 0; 796 LV.SetGlobalObjCRef(LV, false); 797 } 798 else if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 799 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 800 // We don't know if member is an 'ivar', but this flag is looked at 801 // only in the context of LV.isObjCIvar(). 802 LV.SetObjCArray(LV, E->getType()->isArrayType()); 803 } 804} 805 806LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 807 const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); 808 809 if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || 810 isa<ImplicitParamDecl>(VD))) { 811 LValue LV; 812 bool NonGCable = VD->hasLocalStorage() && 813 !VD->hasAttr<BlocksAttr>(); 814 if (VD->hasExternalStorage()) { 815 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 816 if (VD->getType()->isReferenceType()) 817 V = Builder.CreateLoad(V, "tmp"); 818 LV = LValue::MakeAddr(V, MakeQualifiers(E->getType())); 819 } else { 820 llvm::Value *V = LocalDeclMap[VD]; 821 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 822 823 Qualifiers Quals = MakeQualifiers(E->getType()); 824 // local variables do not get their gc attribute set. 825 // local static? 826 if (NonGCable) Quals.removeObjCGCAttr(); 827 828 if (VD->hasAttr<BlocksAttr>()) { 829 V = Builder.CreateStructGEP(V, 1, "forwarding"); 830 V = Builder.CreateLoad(V, false); 831 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 832 VD->getNameAsString()); 833 } 834 if (VD->getType()->isReferenceType()) 835 V = Builder.CreateLoad(V, "tmp"); 836 LV = LValue::MakeAddr(V, Quals); 837 } 838 LValue::SetObjCNonGC(LV, NonGCable); 839 setObjCGCLValueClass(getContext(), E, LV); 840 return LV; 841 } else if (VD && VD->isFileVarDecl()) { 842 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 843 if (VD->getType()->isReferenceType()) 844 V = Builder.CreateLoad(V, "tmp"); 845 LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType())); 846 setObjCGCLValueClass(getContext(), E, LV); 847 return LV; 848 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) { 849 llvm::Value* V = CGM.GetAddrOfFunction(FD); 850 if (!FD->hasPrototype()) { 851 if (const FunctionProtoType *Proto = 852 FD->getType()->getAs<FunctionProtoType>()) { 853 // Ugly case: for a K&R-style definition, the type of the definition 854 // isn't the same as the type of a use. Correct for this with a 855 // bitcast. 856 QualType NoProtoType = 857 getContext().getFunctionNoProtoType(Proto->getResultType()); 858 NoProtoType = getContext().getPointerType(NoProtoType); 859 V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp"); 860 } 861 } 862 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 863 } else if (const ImplicitParamDecl *IPD = 864 dyn_cast<ImplicitParamDecl>(E->getDecl())) { 865 llvm::Value *V = LocalDeclMap[IPD]; 866 assert(V && "BlockVarDecl not entered in LocalDeclMap?"); 867 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 868 } else if (E->getQualifier()) { 869 // FIXME: the qualifier check does not seem sufficient here 870 return EmitPointerToDataMemberLValue(E); 871 } 872 assert(0 && "Unimp declref"); 873 //an invalid LValue, but the assert will 874 //ensure that this point is never reached. 875 return LValue(); 876} 877 878LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 879 return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType())); 880} 881 882LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 883 // __extension__ doesn't affect lvalue-ness. 884 if (E->getOpcode() == UnaryOperator::Extension) 885 return EmitLValue(E->getSubExpr()); 886 887 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 888 switch (E->getOpcode()) { 889 default: assert(0 && "Unknown unary operator lvalue!"); 890 case UnaryOperator::Deref: 891 { 892 QualType T = E->getSubExpr()->getType()->getPointeeType(); 893 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 894 895 Qualifiers Quals = MakeQualifiers(T); 896 Quals.setAddressSpace(ExprTy.getAddressSpace()); 897 898 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals); 899 // We should not generate __weak write barrier on indirect reference 900 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 901 // But, we continue to generate __strong write barrier on indirect write 902 // into a pointer to object. 903 if (getContext().getLangOptions().ObjC1 && 904 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 905 LV.isObjCWeak()) 906 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 907 return LV; 908 } 909 case UnaryOperator::Real: 910 case UnaryOperator::Imag: 911 LValue LV = EmitLValue(E->getSubExpr()); 912 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 913 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 914 Idx, "idx"), 915 MakeQualifiers(ExprTy)); 916 } 917} 918 919LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 920 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 921 Qualifiers()); 922} 923 924LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 925 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 926 Qualifiers()); 927} 928 929 930LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 931 std::string GlobalVarName; 932 933 switch (Type) { 934 default: 935 assert(0 && "Invalid type"); 936 case PredefinedExpr::Func: 937 GlobalVarName = "__func__."; 938 break; 939 case PredefinedExpr::Function: 940 GlobalVarName = "__FUNCTION__."; 941 break; 942 case PredefinedExpr::PrettyFunction: 943 GlobalVarName = "__PRETTY_FUNCTION__."; 944 break; 945 } 946 947 llvm::StringRef FnName = CurFn->getName(); 948 if (FnName.startswith("\01")) 949 FnName = FnName.substr(1); 950 GlobalVarName += FnName; 951 952 std::string FunctionName = 953 PredefinedExpr::ComputeName(getContext(), (PredefinedExpr::IdentType)Type, 954 CurCodeDecl); 955 956 llvm::Constant *C = 957 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 958 return LValue::MakeAddr(C, Qualifiers()); 959} 960 961LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 962 switch (E->getIdentType()) { 963 default: 964 return EmitUnsupportedLValue(E, "predefined expression"); 965 case PredefinedExpr::Func: 966 case PredefinedExpr::Function: 967 case PredefinedExpr::PrettyFunction: 968 return EmitPredefinedFunctionName(E->getIdentType()); 969 } 970} 971 972LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 973 // The index must always be an integer, which is not an aggregate. Emit it. 974 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 975 QualType IdxTy = E->getIdx()->getType(); 976 bool IdxSigned = IdxTy->isSignedIntegerType(); 977 978 // If the base is a vector type, then we are forming a vector element lvalue 979 // with this subscript. 980 if (E->getBase()->getType()->isVectorType()) { 981 // Emit the vector as an lvalue to get its address. 982 LValue LHS = EmitLValue(E->getBase()); 983 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 984 Idx = Builder.CreateIntCast(Idx, 985 llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); 986 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 987 E->getBase()->getType().getCVRQualifiers()); 988 } 989 990 // The base must be a pointer, which is not an aggregate. Emit it. 991 llvm::Value *Base = EmitScalarExpr(E->getBase()); 992 993 // Extend or truncate the index type to 32 or 64-bits. 994 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 995 if (IdxBitwidth != LLVMPointerWidth) 996 Idx = Builder.CreateIntCast(Idx, 997 llvm::IntegerType::get(VMContext, LLVMPointerWidth), 998 IdxSigned, "idxprom"); 999 1000 // We know that the pointer points to a type of the correct size, unless the 1001 // size is a VLA or Objective-C interface. 1002 llvm::Value *Address = 0; 1003 if (const VariableArrayType *VAT = 1004 getContext().getAsVariableArrayType(E->getType())) { 1005 llvm::Value *VLASize = GetVLASize(VAT); 1006 1007 Idx = Builder.CreateMul(Idx, VLASize); 1008 1009 QualType BaseType = getContext().getBaseElementType(VAT); 1010 1011 uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; 1012 Idx = Builder.CreateUDiv(Idx, 1013 llvm::ConstantInt::get(Idx->getType(), 1014 BaseTypeSize)); 1015 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1016 } else if (const ObjCInterfaceType *OIT = 1017 dyn_cast<ObjCInterfaceType>(E->getType())) { 1018 llvm::Value *InterfaceSize = 1019 llvm::ConstantInt::get(Idx->getType(), 1020 getContext().getTypeSize(OIT) / 8); 1021 1022 Idx = Builder.CreateMul(Idx, InterfaceSize); 1023 1024 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 1025 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 1026 Idx, "arrayidx"); 1027 Address = Builder.CreateBitCast(Address, Base->getType()); 1028 } else { 1029 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1030 } 1031 1032 QualType T = E->getBase()->getType()->getPointeeType(); 1033 assert(!T.isNull() && 1034 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1035 1036 Qualifiers Quals = MakeQualifiers(T); 1037 Quals.setAddressSpace(E->getBase()->getType().getAddressSpace()); 1038 1039 LValue LV = LValue::MakeAddr(Address, Quals); 1040 if (getContext().getLangOptions().ObjC1 && 1041 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) { 1042 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1043 setObjCGCLValueClass(getContext(), E, LV); 1044 } 1045 return LV; 1046} 1047 1048static 1049llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1050 llvm::SmallVector<unsigned, 4> &Elts) { 1051 llvm::SmallVector<llvm::Constant *, 4> CElts; 1052 1053 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1054 CElts.push_back(llvm::ConstantInt::get( 1055 llvm::Type::getInt32Ty(VMContext), Elts[i])); 1056 1057 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 1058} 1059 1060LValue CodeGenFunction:: 1061EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1062 // Emit the base vector as an l-value. 1063 LValue Base; 1064 1065 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1066 if (!E->isArrow()) { 1067 assert(E->getBase()->getType()->isVectorType()); 1068 Base = EmitLValue(E->getBase()); 1069 } else { 1070 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1071 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1072 Qualifiers Quals = MakeQualifiers(PT->getPointeeType()); 1073 Quals.removeObjCGCAttr(); 1074 Base = LValue::MakeAddr(Ptr, Quals); 1075 } 1076 1077 // Encode the element access list into a vector of unsigned indices. 1078 llvm::SmallVector<unsigned, 4> Indices; 1079 E->getEncodedElementAccess(Indices); 1080 1081 if (Base.isSimple()) { 1082 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 1083 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 1084 Base.getVRQualifiers()); 1085 } 1086 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1087 1088 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1089 llvm::SmallVector<llvm::Constant *, 4> CElts; 1090 1091 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1092 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1093 CElts.push_back(llvm::ConstantInt::get( 1094 llvm::Type::getInt32Ty(VMContext), 0)); 1095 else 1096 CElts.push_back(BaseElts->getOperand(Indices[i])); 1097 } 1098 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1099 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1100 Base.getVRQualifiers()); 1101} 1102 1103LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1104 bool isUnion = false; 1105 bool isNonGC = false; 1106 Expr *BaseExpr = E->getBase(); 1107 llvm::Value *BaseValue = NULL; 1108 Qualifiers BaseQuals; 1109 1110 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1111 if (E->isArrow()) { 1112 BaseValue = EmitScalarExpr(BaseExpr); 1113 const PointerType *PTy = 1114 BaseExpr->getType()->getAs<PointerType>(); 1115 if (PTy->getPointeeType()->isUnionType()) 1116 isUnion = true; 1117 BaseQuals = PTy->getPointeeType().getQualifiers(); 1118 } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) || 1119 isa<ObjCImplicitSetterGetterRefExpr>( 1120 BaseExpr->IgnoreParens())) { 1121 RValue RV = EmitObjCPropertyGet(BaseExpr); 1122 BaseValue = RV.getAggregateAddr(); 1123 if (BaseExpr->getType()->isUnionType()) 1124 isUnion = true; 1125 BaseQuals = BaseExpr->getType().getQualifiers(); 1126 } else { 1127 LValue BaseLV = EmitLValue(BaseExpr); 1128 if (BaseLV.isNonGC()) 1129 isNonGC = true; 1130 // FIXME: this isn't right for bitfields. 1131 BaseValue = BaseLV.getAddress(); 1132 QualType BaseTy = BaseExpr->getType(); 1133 if (BaseTy->isUnionType()) 1134 isUnion = true; 1135 BaseQuals = BaseTy.getQualifiers(); 1136 } 1137 1138 FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl()); 1139 // FIXME: Handle non-field member expressions 1140 assert(Field && "No code generation for non-field member references"); 1141 LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion, 1142 BaseQuals.getCVRQualifiers()); 1143 LValue::SetObjCNonGC(MemExpLV, isNonGC); 1144 setObjCGCLValueClass(getContext(), E, MemExpLV); 1145 return MemExpLV; 1146} 1147 1148LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1149 FieldDecl* Field, 1150 unsigned CVRQualifiers) { 1151 CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); 1152 1153 // FIXME: CodeGenTypes should expose a method to get the appropriate type for 1154 // FieldTy (the appropriate type is ABI-dependent). 1155 const llvm::Type *FieldTy = 1156 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1157 const llvm::PointerType *BaseTy = 1158 cast<llvm::PointerType>(BaseValue->getType()); 1159 unsigned AS = BaseTy->getAddressSpace(); 1160 BaseValue = Builder.CreateBitCast(BaseValue, 1161 llvm::PointerType::get(FieldTy, AS), 1162 "tmp"); 1163 1164 llvm::Value *Idx = 1165 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo); 1166 llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp"); 1167 1168 return LValue::MakeBitfield(V, Info.Start, Info.Size, 1169 Field->getType()->isSignedIntegerType(), 1170 Field->getType().getCVRQualifiers()|CVRQualifiers); 1171} 1172 1173LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1174 FieldDecl* Field, 1175 bool isUnion, 1176 unsigned CVRQualifiers) { 1177 if (Field->isBitField()) 1178 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1179 1180 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1181 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1182 1183 // Match union field type. 1184 if (isUnion) { 1185 const llvm::Type *FieldTy = 1186 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1187 const llvm::PointerType * BaseTy = 1188 cast<llvm::PointerType>(BaseValue->getType()); 1189 unsigned AS = BaseTy->getAddressSpace(); 1190 V = Builder.CreateBitCast(V, 1191 llvm::PointerType::get(FieldTy, AS), 1192 "tmp"); 1193 } 1194 if (Field->getType()->isReferenceType()) 1195 V = Builder.CreateLoad(V, "tmp"); 1196 1197 Qualifiers Quals = MakeQualifiers(Field->getType()); 1198 Quals.addCVRQualifiers(CVRQualifiers); 1199 // __weak attribute on a field is ignored. 1200 if (Quals.getObjCGCAttr() == Qualifiers::Weak) 1201 Quals.removeObjCGCAttr(); 1202 1203 return LValue::MakeAddr(V, Quals); 1204} 1205 1206LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1207 const llvm::Type *LTy = ConvertType(E->getType()); 1208 llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); 1209 1210 const Expr* InitExpr = E->getInitializer(); 1211 LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType())); 1212 1213 if (E->getType()->isComplexType()) { 1214 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1215 } else if (hasAggregateLLVMType(E->getType())) { 1216 EmitAnyExpr(InitExpr, DeclPtr, false); 1217 } else { 1218 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1219 } 1220 1221 return Result; 1222} 1223 1224LValue 1225CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) { 1226 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 1227 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1228 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1229 llvm::BasicBlock *ContBlock = createBasicBlock("cond.end"); 1230 1231 llvm::Value *Cond = EvaluateExprAsBool(E->getCond()); 1232 Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); 1233 1234 EmitBlock(LHSBlock); 1235 1236 LValue LHS = EmitLValue(E->getLHS()); 1237 if (!LHS.isSimple()) 1238 return EmitUnsupportedLValue(E, "conditional operator"); 1239 1240 llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(), 1241 "condtmp"); 1242 1243 Builder.CreateStore(LHS.getAddress(), Temp); 1244 EmitBranch(ContBlock); 1245 1246 EmitBlock(RHSBlock); 1247 LValue RHS = EmitLValue(E->getRHS()); 1248 if (!RHS.isSimple()) 1249 return EmitUnsupportedLValue(E, "conditional operator"); 1250 1251 Builder.CreateStore(RHS.getAddress(), Temp); 1252 EmitBranch(ContBlock); 1253 1254 EmitBlock(ContBlock); 1255 1256 Temp = Builder.CreateLoad(Temp, "lv"); 1257 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1258 } 1259 1260 // ?: here should be an aggregate. 1261 assert((hasAggregateLLVMType(E->getType()) && 1262 !E->getType()->isAnyComplexType()) && 1263 "Unexpected conditional operator!"); 1264 1265 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1266 EmitAggExpr(E, Temp, false); 1267 1268 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1269} 1270 1271/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code 1272/// generator in an lvalue context, then it must mean that we need the address 1273/// of an aggregate in order to access one of its fields. This can happen for 1274/// all the reasons that casts are permitted with aggregate result, including 1275/// noop aggregate casts, and cast from scalar to union. 1276LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1277 switch (E->getCastKind()) { 1278 default: 1279 // If this is an lvalue cast, treat it as a no-op. 1280 // FIXME: We shouldn't need to check for this explicitly! 1281 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) 1282 if (ICE->isLvalueCast()) 1283 return EmitLValue(E->getSubExpr()); 1284 1285 assert(0 && "Unhandled cast!"); 1286 1287 case CastExpr::CK_NoOp: 1288 case CastExpr::CK_ConstructorConversion: 1289 case CastExpr::CK_UserDefinedConversion: 1290 return EmitLValue(E->getSubExpr()); 1291 1292 case CastExpr::CK_DerivedToBase: { 1293 const RecordType *DerivedClassTy = 1294 E->getSubExpr()->getType()->getAs<RecordType>(); 1295 CXXRecordDecl *DerivedClassDecl = 1296 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1297 1298 const RecordType *BaseClassTy = E->getType()->getAs<RecordType>(); 1299 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl()); 1300 1301 LValue LV = EmitLValue(E->getSubExpr()); 1302 1303 // Perform the derived-to-base conversion 1304 llvm::Value *Base = 1305 GetAddressCXXOfBaseClass(LV.getAddress(), DerivedClassDecl, 1306 BaseClassDecl, /*NullCheckValue=*/false); 1307 1308 return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); 1309 } 1310 1311 case CastExpr::CK_ToUnion: { 1312 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1313 EmitAnyExpr(E->getSubExpr(), Temp, false); 1314 1315 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1316 } 1317 } 1318} 1319 1320LValue CodeGenFunction::EmitNullInitializationLValue( 1321 const CXXZeroInitValueExpr *E) { 1322 QualType Ty = E->getType(); 1323 const llvm::Type *LTy = ConvertTypeForMem(Ty); 1324 llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); 1325 unsigned Align = getContext().getTypeAlign(Ty)/8; 1326 Alloc->setAlignment(Align); 1327 LValue lvalue = LValue::MakeAddr(Alloc, Qualifiers()); 1328 EmitMemSetToZero(lvalue.getAddress(), Ty); 1329 return lvalue; 1330} 1331 1332//===--------------------------------------------------------------------===// 1333// Expression Emission 1334//===--------------------------------------------------------------------===// 1335 1336 1337RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { 1338 // Builtins never have block type. 1339 if (E->getCallee()->getType()->isBlockPointerType()) 1340 return EmitBlockCallExpr(E); 1341 1342 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1343 return EmitCXXMemberCallExpr(CE); 1344 1345 const Decl *TargetDecl = 0; 1346 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1347 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1348 TargetDecl = DRE->getDecl(); 1349 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1350 if (unsigned builtinID = FD->getBuiltinID()) 1351 return EmitBuiltinExpr(FD, builtinID, E); 1352 } 1353 } 1354 1355 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1356 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1357 return EmitCXXOperatorMemberCallExpr(CE, MD); 1358 1359 if (isa<CXXPseudoDestructorExpr>(E->getCallee())) { 1360 // C++ [expr.pseudo]p1: 1361 // The result shall only be used as the operand for the function call 1362 // operator (), and the result of such a call has type void. The only 1363 // effect is the evaluation of the postfix-expression before the dot or 1364 // arrow. 1365 EmitScalarExpr(E->getCallee()); 1366 return RValue::get(0); 1367 } 1368 1369 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1370 return EmitCall(Callee, E->getCallee()->getType(), 1371 E->arg_begin(), E->arg_end(), TargetDecl); 1372} 1373 1374LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1375 // Comma expressions just emit their LHS then their RHS as an l-value. 1376 if (E->getOpcode() == BinaryOperator::Comma) { 1377 EmitAnyExpr(E->getLHS()); 1378 return EmitLValue(E->getRHS()); 1379 } 1380 1381 if (E->getOpcode() == BinaryOperator::PtrMemD || 1382 E->getOpcode() == BinaryOperator::PtrMemI) 1383 return EmitPointerToDataMemberBinaryExpr(E); 1384 1385 // Can only get l-value for binary operator expressions which are a 1386 // simple assignment of aggregate type. 1387 if (E->getOpcode() != BinaryOperator::Assign) 1388 return EmitUnsupportedLValue(E, "binary l-value expression"); 1389 1390 if (!hasAggregateLLVMType(E->getType())) { 1391 // Emit the LHS as an l-value. 1392 LValue LV = EmitLValue(E->getLHS()); 1393 1394 llvm::Value *RHS = EmitScalarExpr(E->getRHS()); 1395 EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 1396 E->getType()); 1397 return LV; 1398 } 1399 1400 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1401 EmitAggExpr(E, Temp, false); 1402 // FIXME: Are these qualifiers correct? 1403 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1404} 1405 1406LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1407 RValue RV = EmitCallExpr(E); 1408 1409 if (RV.isScalar()) { 1410 assert(E->getCallReturnType()->isReferenceType() && 1411 "Can't have a scalar return unless the return type is a " 1412 "reference type!"); 1413 1414 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1415 } 1416 1417 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1418} 1419 1420LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1421 // FIXME: This shouldn't require another copy. 1422 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1423 EmitAggExpr(E, Temp, false); 1424 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1425} 1426 1427LValue 1428CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { 1429 EmitLocalBlockVarDecl(*E->getVarDecl()); 1430 return EmitDeclRefLValue(E); 1431} 1432 1433LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1434 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp"); 1435 EmitCXXConstructExpr(Temp, E); 1436 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1437} 1438 1439LValue 1440CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1441 LValue LV = EmitLValue(E->getSubExpr()); 1442 1443 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1444 1445 return LV; 1446} 1447 1448LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1449 // Can only get l-value for message expression returning aggregate type 1450 RValue RV = EmitObjCMessageExpr(E); 1451 // FIXME: can this be volatile? 1452 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1453} 1454 1455llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1456 const ObjCIvarDecl *Ivar) { 1457 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1458} 1459 1460LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1461 llvm::Value *BaseValue, 1462 const ObjCIvarDecl *Ivar, 1463 unsigned CVRQualifiers) { 1464 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1465 Ivar, CVRQualifiers); 1466} 1467 1468LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1469 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1470 llvm::Value *BaseValue = 0; 1471 const Expr *BaseExpr = E->getBase(); 1472 Qualifiers BaseQuals; 1473 QualType ObjectTy; 1474 if (E->isArrow()) { 1475 BaseValue = EmitScalarExpr(BaseExpr); 1476 ObjectTy = BaseExpr->getType()->getPointeeType(); 1477 BaseQuals = ObjectTy.getQualifiers(); 1478 } else { 1479 LValue BaseLV = EmitLValue(BaseExpr); 1480 // FIXME: this isn't right for bitfields. 1481 BaseValue = BaseLV.getAddress(); 1482 ObjectTy = BaseExpr->getType(); 1483 BaseQuals = ObjectTy.getQualifiers(); 1484 } 1485 1486 LValue LV = 1487 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 1488 BaseQuals.getCVRQualifiers()); 1489 setObjCGCLValueClass(getContext(), E, LV); 1490 return LV; 1491} 1492 1493LValue 1494CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1495 // This is a special l-value that just issues sends when we load or store 1496 // through it. 1497 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1498} 1499 1500LValue 1501CodeGenFunction::EmitObjCKVCRefLValue( 1502 const ObjCImplicitSetterGetterRefExpr *E) { 1503 // This is a special l-value that just issues sends when we load or store 1504 // through it. 1505 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1506} 1507 1508LValue 1509CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1510 return EmitUnsupportedLValue(E, "use of super"); 1511} 1512 1513LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1514 1515 // Can only get l-value for message expression returning aggregate type 1516 RValue RV = EmitAnyExprToTemp(E); 1517 // FIXME: can this be volatile? 1518 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1519} 1520 1521 1522LValue CodeGenFunction::EmitPointerToDataMemberLValue(const DeclRefExpr *E) { 1523 const FieldDecl *Field = cast<FieldDecl>(E->getDecl()); 1524 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Field->getDeclContext()); 1525 QualType NNSpecTy = 1526 getContext().getCanonicalType( 1527 getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(ClassDecl))); 1528 NNSpecTy = getContext().getPointerType(NNSpecTy); 1529 llvm::Value *V = llvm::Constant::getNullValue(ConvertType(NNSpecTy)); 1530 LValue MemExpLV = EmitLValueForField(V, const_cast<FieldDecl*>(Field), 1531 /*isUnion*/false, /*Qualifiers*/0); 1532 const llvm::Type* ResultType = ConvertType( 1533 getContext().getPointerDiffType()); 1534 V = Builder.CreatePtrToInt(MemExpLV.getAddress(), ResultType, 1535 "datamember"); 1536 LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1537 return LV; 1538} 1539 1540RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, 1541 CallExpr::const_arg_iterator ArgBeg, 1542 CallExpr::const_arg_iterator ArgEnd, 1543 const Decl *TargetDecl) { 1544 // Get the actual function type. The callee type will always be a pointer to 1545 // function type or a block pointer type. 1546 assert(CalleeType->isFunctionPointerType() && 1547 "Call must have function pointer type!"); 1548 1549 CalleeType = getContext().getCanonicalType(CalleeType); 1550 1551 QualType FnType = cast<PointerType>(CalleeType)->getPointeeType(); 1552 QualType ResultType = cast<FunctionType>(FnType)->getResultType(); 1553 1554 CallArgList Args; 1555 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 1556 1557 // FIXME: We should not need to do this, it should be part of the function 1558 // type. 1559 unsigned CallingConvention = 0; 1560 if (const llvm::Function *F = 1561 dyn_cast<llvm::Function>(Callee->stripPointerCasts())) 1562 CallingConvention = F->getCallingConv(); 1563 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 1564 CallingConvention), 1565 Callee, Args, TargetDecl); 1566} 1567 1568LValue CodeGenFunction::EmitPointerToDataMemberBinaryExpr( 1569 const BinaryOperator *E) { 1570 llvm::Value *BaseV = EmitLValue(E->getLHS()).getAddress(); 1571 if (E->getOpcode() == BinaryOperator::PtrMemI) 1572 BaseV = Builder.CreateLoad(BaseV, "indir.ptr"); 1573 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext()); 1574 BaseV = Builder.CreateBitCast(BaseV, i8Ty); 1575 LValue RHSLV = EmitLValue(E->getRHS()); 1576 llvm::Value *OffsetV = 1577 EmitLoadOfLValue(RHSLV, E->getRHS()->getType()).getScalarVal(); 1578 const llvm::Type* ResultType = ConvertType(getContext().getPointerDiffType()); 1579 OffsetV = Builder.CreateBitCast(OffsetV, ResultType); 1580 llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr"); 1581 QualType Ty = E->getRHS()->getType(); 1582 const MemberPointerType *MemPtrType = Ty->getAs<MemberPointerType>(); 1583 Ty = MemPtrType->getPointeeType(); 1584 const llvm::Type* PType = 1585 ConvertType(getContext().getPointerType(Ty)); 1586 AddV = Builder.CreateBitCast(AddV, PType); 1587 LValue LV = LValue::MakeAddr(AddV, MakeQualifiers(Ty)); 1588 return LV; 1589} 1590 1591