CGExpr.cpp revision 0979c805475d1ba49b5d6ef93c4d2ce6d2eab6ed
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGObjCRuntime.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/DeclObjC.h" 20#include "llvm/Target/TargetData.h" 21using namespace clang; 22using namespace CodeGen; 23 24//===--------------------------------------------------------------------===// 25// Miscellaneous Helper Methods 26//===--------------------------------------------------------------------===// 27 28/// CreateTempAlloca - This creates a alloca and inserts it into the entry 29/// block. 30llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 31 const char *Name) { 32 if (!Builder.isNamePreserving()) 33 Name = ""; 34 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 35} 36 37/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 38/// expression and compare the result against zero, returning an Int1Ty value. 39llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 40 QualType BoolTy = getContext().BoolTy; 41 if (!E->getType()->isAnyComplexType()) 42 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 43 44 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 45} 46 47/// EmitAnyExpr - Emit code to compute the specified expression which can have 48/// any type. The result is returned as an RValue struct. If this is an 49/// aggregate expression, the aggloc/agglocvolatile arguments indicate where 50/// the result should be returned. 51RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 52 bool IsAggLocVolatile, bool IgnoreResult, 53 bool IsInitializer) { 54 if (!hasAggregateLLVMType(E->getType())) 55 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 56 else if (E->getType()->isAnyComplexType()) 57 return RValue::getComplex(EmitComplexExpr(E, false, false, 58 IgnoreResult, IgnoreResult)); 59 60 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 61 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 62} 63 64/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result 65/// will always be accessible even if no aggregate location is 66/// provided. 67RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 68 bool IsAggLocVolatile, 69 bool IsInitializer) { 70 llvm::Value *AggLoc = 0; 71 72 if (hasAggregateLLVMType(E->getType()) && 73 !E->getType()->isAnyComplexType()) 74 AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); 75 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 76 IsInitializer); 77} 78 79RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 80 QualType DestType, 81 bool IsInitializer) { 82 RValue Val; 83 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 84 // Emit the expr as an lvalue. 85 LValue LV = EmitLValue(E); 86 if (LV.isSimple()) 87 return RValue::get(LV.getAddress()); 88 Val = EmitLoadOfLValue(LV, E->getType()); 89 } else { 90 // FIXME: Initializers don't work with casts yet. For example 91 // const A& a = B(); 92 // if B inherits from A. 93 Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, 94 IsInitializer); 95 96 if (IsInitializer) { 97 // We might have to destroy the temporary variable. 98 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 99 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 100 if (!ClassDecl->hasTrivialDestructor()) { 101 const CXXDestructorDecl *Dtor = 102 ClassDecl->getDestructor(getContext()); 103 104 CleanupScope scope(*this); 105 EmitCXXDestructorCall(Dtor, Dtor_Complete, Val.getAggregateAddr()); 106 } 107 } 108 } 109 } 110 } 111 112 if (Val.isAggregate()) { 113 Val = RValue::get(Val.getAggregateAddr()); 114 } else { 115 // Create a temporary variable that we can bind the reference to. 116 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), 117 "reftmp"); 118 if (Val.isScalar()) 119 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 120 else 121 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 122 Val = RValue::get(Temp); 123 } 124 125 return Val; 126} 127 128 129/// getAccessedFieldNo - Given an encoded value and a result number, return 130/// the input field number being accessed. 131unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 132 const llvm::Constant *Elts) { 133 if (isa<llvm::ConstantAggregateZero>(Elts)) 134 return 0; 135 136 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 137} 138 139 140//===----------------------------------------------------------------------===// 141// LValue Expression Emission 142//===----------------------------------------------------------------------===// 143 144RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 145 if (Ty->isVoidType()) { 146 return RValue::get(0); 147 } else if (const ComplexType *CTy = Ty->getAsComplexType()) { 148 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 149 llvm::Value *U = llvm::UndefValue::get(EltTy); 150 return RValue::getComplex(std::make_pair(U, U)); 151 } else if (hasAggregateLLVMType(Ty)) { 152 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 153 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 154 } else { 155 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 156 } 157} 158 159RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 160 const char *Name) { 161 ErrorUnsupported(E, Name); 162 return GetUndefRValue(E->getType()); 163} 164 165LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 166 const char *Name) { 167 ErrorUnsupported(E, Name); 168 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 169 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 170 E->getType().getCVRQualifiers(), 171 getContext().getObjCGCAttrKind(E->getType()), 172 E->getType().getAddressSpace()); 173} 174 175/// EmitLValue - Emit code to compute a designator that specifies the location 176/// of the expression. 177/// 178/// This can return one of two things: a simple address or a bitfield 179/// reference. In either case, the LLVM Value* in the LValue structure is 180/// guaranteed to be an LLVM pointer type. 181/// 182/// If this returns a bitfield reference, nothing about the pointee type of 183/// the LLVM value is known: For example, it may not be a pointer to an 184/// integer. 185/// 186/// If this returns a normal address, and if the lvalue's C type is fixed 187/// size, this method guarantees that the returned pointer type will point to 188/// an LLVM type of the same size of the lvalue's type. If the lvalue has a 189/// variable length type, this is not possible. 190/// 191LValue CodeGenFunction::EmitLValue(const Expr *E) { 192 switch (E->getStmtClass()) { 193 default: return EmitUnsupportedLValue(E, "l-value expression"); 194 195 case Expr::BinaryOperatorClass: 196 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 197 case Expr::CallExprClass: 198 case Expr::CXXOperatorCallExprClass: 199 return EmitCallExprLValue(cast<CallExpr>(E)); 200 case Expr::VAArgExprClass: 201 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 202 case Expr::DeclRefExprClass: 203 case Expr::QualifiedDeclRefExprClass: 204 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 205 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 206 case Expr::PredefinedExprClass: 207 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 208 case Expr::StringLiteralClass: 209 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 210 case Expr::ObjCEncodeExprClass: 211 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 212 213 case Expr::BlockDeclRefExprClass: 214 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 215 216 case Expr::CXXConditionDeclExprClass: 217 return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); 218 case Expr::CXXTemporaryObjectExprClass: 219 case Expr::CXXConstructExprClass: 220 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 221 case Expr::CXXBindTemporaryExprClass: 222 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 223 224 case Expr::ObjCMessageExprClass: 225 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 226 case Expr::ObjCIvarRefExprClass: 227 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 228 case Expr::ObjCPropertyRefExprClass: 229 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 230 case Expr::ObjCImplicitSetterGetterRefExprClass: 231 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 232 case Expr::ObjCSuperExprClass: 233 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 234 235 case Expr::StmtExprClass: 236 return EmitStmtExprLValue(cast<StmtExpr>(E)); 237 case Expr::UnaryOperatorClass: 238 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 239 case Expr::ArraySubscriptExprClass: 240 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 241 case Expr::ExtVectorElementExprClass: 242 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 243 case Expr::MemberExprClass: 244 case Stmt::CXXAdornedMemberExprClass: 245 return EmitMemberExpr(cast<MemberExpr>(E)); 246 case Expr::CompoundLiteralExprClass: 247 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 248 case Expr::ConditionalOperatorClass: 249 return EmitConditionalOperator(cast<ConditionalOperator>(E)); 250 case Expr::ChooseExprClass: 251 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 252 case Expr::ImplicitCastExprClass: 253 case Expr::CStyleCastExprClass: 254 case Expr::CXXFunctionalCastExprClass: 255 case Expr::CXXStaticCastExprClass: 256 case Expr::CXXDynamicCastExprClass: 257 case Expr::CXXReinterpretCastExprClass: 258 case Expr::CXXConstCastExprClass: 259 return EmitCastLValue(cast<CastExpr>(E)); 260 } 261} 262 263llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 264 QualType Ty) { 265 llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); 266 267 // Bool can have different representation in memory than in registers. 268 if (Ty->isBooleanType()) 269 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 270 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 271 272 return V; 273} 274 275void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 276 bool Volatile, QualType Ty) { 277 278 if (Ty->isBooleanType()) { 279 // Bool can have different representation in memory than in registers. 280 const llvm::Type *SrcTy = Value->getType(); 281 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 282 if (DstPtr->getElementType() != SrcTy) { 283 const llvm::Type *MemTy = 284 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 285 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 286 } 287 } 288 Builder.CreateStore(Value, Addr, Volatile); 289} 290 291/// EmitLoadOfLValue - Given an expression that represents a value lvalue, 292/// this method emits the address of the lvalue, then loads the result as an 293/// rvalue, returning the rvalue. 294RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 295 if (LV.isObjCWeak()) { 296 // load of a __weak object. 297 llvm::Value *AddrWeakObj = LV.getAddress(); 298 llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, 299 AddrWeakObj); 300 return RValue::get(read_weak); 301 } 302 303 if (LV.isSimple()) { 304 llvm::Value *Ptr = LV.getAddress(); 305 const llvm::Type *EltTy = 306 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 307 308 // Simple scalar l-value. 309 if (EltTy->isSingleValueType()) 310 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 311 ExprType)); 312 313 assert(ExprType->isFunctionType() && "Unknown scalar value"); 314 return RValue::get(Ptr); 315 } 316 317 if (LV.isVectorElt()) { 318 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 319 LV.isVolatileQualified(), "tmp"); 320 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 321 "vecext")); 322 } 323 324 // If this is a reference to a subset of the elements of a vector, either 325 // shuffle the input or extract/insert them as appropriate. 326 if (LV.isExtVectorElt()) 327 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 328 329 if (LV.isBitfield()) 330 return EmitLoadOfBitfieldLValue(LV, ExprType); 331 332 if (LV.isPropertyRef()) 333 return EmitLoadOfPropertyRefLValue(LV, ExprType); 334 335 assert(LV.isKVCRef() && "Unknown LValue type!"); 336 return EmitLoadOfKVCRefLValue(LV, ExprType); 337} 338 339RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 340 QualType ExprType) { 341 unsigned StartBit = LV.getBitfieldStartBit(); 342 unsigned BitfieldSize = LV.getBitfieldSize(); 343 llvm::Value *Ptr = LV.getBitfieldAddr(); 344 345 const llvm::Type *EltTy = 346 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 347 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 348 349 // In some cases the bitfield may straddle two memory locations. 350 // Currently we load the entire bitfield, then do the magic to 351 // sign-extend it if necessary. This results in somewhat more code 352 // than necessary for the common case (one load), since two shifts 353 // accomplish both the masking and sign extension. 354 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 355 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 356 357 // Shift to proper location. 358 if (StartBit) 359 Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), 360 "bf.lo"); 361 362 // Mask off unused bits. 363 llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext, 364 llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 365 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 366 367 // Fetch the high bits if necessary. 368 if (LowBits < BitfieldSize) { 369 unsigned HighBits = BitfieldSize - LowBits; 370 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 371 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 372 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 373 LV.isVolatileQualified(), 374 "tmp"); 375 376 // Mask off unused bits. 377 llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext, 378 llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 379 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 380 381 // Shift to proper location and or in to bitfield value. 382 HighVal = Builder.CreateShl(HighVal, 383 llvm::ConstantInt::get(EltTy, LowBits)); 384 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 385 } 386 387 // Sign extend if necessary. 388 if (LV.isBitfieldSigned()) { 389 llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, 390 EltTySize - BitfieldSize); 391 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 392 ExtraBits, "bf.val.sext"); 393 } 394 395 // The bitfield type and the normal type differ when the storage sizes 396 // differ (currently just _Bool). 397 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 398 399 return RValue::get(Val); 400} 401 402RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 403 QualType ExprType) { 404 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 405} 406 407RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 408 QualType ExprType) { 409 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 410} 411 412// If this is a reference to a subset of the elements of a vector, create an 413// appropriate shufflevector. 414RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 415 QualType ExprType) { 416 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 417 LV.isVolatileQualified(), "tmp"); 418 419 const llvm::Constant *Elts = LV.getExtVectorElts(); 420 421 // If the result of the expression is a non-vector type, we must be 422 // extracting a single element. Just codegen as an extractelement. 423 const VectorType *ExprVT = ExprType->getAsVectorType(); 424 if (!ExprVT) { 425 unsigned InIdx = getAccessedFieldNo(0, Elts); 426 llvm::Value *Elt = llvm::ConstantInt::get( 427 llvm::Type::getInt32Ty(VMContext), InIdx); 428 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 429 } 430 431 // Always use shuffle vector to try to retain the original program structure 432 unsigned NumResultElts = ExprVT->getNumElements(); 433 434 llvm::SmallVector<llvm::Constant*, 4> Mask; 435 for (unsigned i = 0; i != NumResultElts; ++i) { 436 unsigned InIdx = getAccessedFieldNo(i, Elts); 437 Mask.push_back(llvm::ConstantInt::get( 438 llvm::Type::getInt32Ty(VMContext), InIdx)); 439 } 440 441 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 442 Vec = Builder.CreateShuffleVector(Vec, 443 llvm::UndefValue::get(Vec->getType()), 444 MaskV, "tmp"); 445 return RValue::get(Vec); 446} 447 448 449 450/// EmitStoreThroughLValue - Store the specified rvalue into the specified 451/// lvalue, where both are guaranteed to the have the same type, and that type 452/// is 'Ty'. 453void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 454 QualType Ty) { 455 if (!Dst.isSimple()) { 456 if (Dst.isVectorElt()) { 457 // Read/modify/write the vector, inserting the new element. 458 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 459 Dst.isVolatileQualified(), "tmp"); 460 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 461 Dst.getVectorIdx(), "vecins"); 462 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 463 return; 464 } 465 466 // If this is an update of extended vector elements, insert them as 467 // appropriate. 468 if (Dst.isExtVectorElt()) 469 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 470 471 if (Dst.isBitfield()) 472 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 473 474 if (Dst.isPropertyRef()) 475 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 476 477 if (Dst.isKVCRef()) 478 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 479 480 assert(0 && "Unknown LValue type"); 481 } 482 483 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 484 // load of a __weak object. 485 llvm::Value *LvalueDst = Dst.getAddress(); 486 llvm::Value *src = Src.getScalarVal(); 487 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 488 return; 489 } 490 491 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 492 // load of a __strong object. 493 llvm::Value *LvalueDst = Dst.getAddress(); 494 llvm::Value *src = Src.getScalarVal(); 495#if 0 496 // FIXME. We cannot positively determine if we have an 'ivar' assignment, 497 // object assignment or an unknown assignment. For now, generate call to 498 // objc_assign_strongCast assignment which is a safe, but consevative 499 // assumption. 500 if (Dst.isObjCIvar()) 501 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst); 502 else 503 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 504#endif 505 if (Dst.isGlobalObjCRef()) 506 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 507 else 508 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 509 return; 510 } 511 512 assert(Src.isScalar() && "Can't emit an agg store with this method"); 513 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 514 Dst.isVolatileQualified(), Ty); 515} 516 517void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 518 QualType Ty, 519 llvm::Value **Result) { 520 unsigned StartBit = Dst.getBitfieldStartBit(); 521 unsigned BitfieldSize = Dst.getBitfieldSize(); 522 llvm::Value *Ptr = Dst.getBitfieldAddr(); 523 524 const llvm::Type *EltTy = 525 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 526 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 527 528 // Get the new value, cast to the appropriate type and masked to 529 // exactly the size of the bit-field. 530 llvm::Value *SrcVal = Src.getScalarVal(); 531 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 532 llvm::Constant *Mask = llvm::ConstantInt::get(VMContext, 533 llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 534 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 535 536 // Return the new value of the bit-field, if requested. 537 if (Result) { 538 // Cast back to the proper type for result. 539 const llvm::Type *SrcTy = SrcVal->getType(); 540 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 541 "bf.reload.val"); 542 543 // Sign extend if necessary. 544 if (Dst.isBitfieldSigned()) { 545 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 546 llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, 547 SrcTySize - BitfieldSize); 548 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 549 ExtraBits, "bf.reload.sext"); 550 } 551 552 *Result = SrcTrunc; 553 } 554 555 // In some cases the bitfield may straddle two memory locations. 556 // Emit the low part first and check to see if the high needs to be 557 // done. 558 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 559 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 560 "bf.prev.low"); 561 562 // Compute the mask for zero-ing the low part of this bitfield. 563 llvm::Constant *InvMask = 564 llvm::ConstantInt::get(VMContext, 565 ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits)); 566 567 // Compute the new low part as 568 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 569 // with the shift of NewVal implicitly stripping the high bits. 570 llvm::Value *NewLowVal = 571 Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), 572 "bf.value.lo"); 573 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 574 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 575 576 // Write back. 577 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 578 579 // If the low part doesn't cover the bitfield emit a high part. 580 if (LowBits < BitfieldSize) { 581 unsigned HighBits = BitfieldSize - LowBits; 582 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 583 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 584 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 585 Dst.isVolatileQualified(), 586 "bf.prev.hi"); 587 588 // Compute the mask for zero-ing the high part of this bitfield. 589 llvm::Constant *InvMask = 590 llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize, 591 HighBits)); 592 593 // Compute the new high part as 594 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 595 // where the high bits of NewVal have already been cleared and the 596 // shift stripping the low bits. 597 llvm::Value *NewHighVal = 598 Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), 599 "bf.value.high"); 600 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 601 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 602 603 // Write back. 604 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 605 } 606} 607 608void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 609 LValue Dst, 610 QualType Ty) { 611 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 612} 613 614void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 615 LValue Dst, 616 QualType Ty) { 617 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 618} 619 620void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 621 LValue Dst, 622 QualType Ty) { 623 // This access turns into a read/modify/write of the vector. Load the input 624 // value now. 625 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 626 Dst.isVolatileQualified(), "tmp"); 627 const llvm::Constant *Elts = Dst.getExtVectorElts(); 628 629 llvm::Value *SrcVal = Src.getScalarVal(); 630 631 if (const VectorType *VTy = Ty->getAsVectorType()) { 632 unsigned NumSrcElts = VTy->getNumElements(); 633 unsigned NumDstElts = 634 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 635 if (NumDstElts == NumSrcElts) { 636 // Use shuffle vector is the src and destination are the same number 637 // of elements and restore the vector mask since it is on the side 638 // it will be stored. 639 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 640 for (unsigned i = 0; i != NumSrcElts; ++i) { 641 unsigned InIdx = getAccessedFieldNo(i, Elts); 642 Mask[InIdx] = llvm::ConstantInt::get( 643 llvm::Type::getInt32Ty(VMContext), i); 644 } 645 646 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 647 Vec = Builder.CreateShuffleVector(SrcVal, 648 llvm::UndefValue::get(Vec->getType()), 649 MaskV, "tmp"); 650 } else if (NumDstElts > NumSrcElts) { 651 // Extended the source vector to the same length and then shuffle it 652 // into the destination. 653 // FIXME: since we're shuffling with undef, can we just use the indices 654 // into that? This could be simpler. 655 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 656 unsigned i; 657 for (i = 0; i != NumSrcElts; ++i) 658 ExtMask.push_back(llvm::ConstantInt::get( 659 llvm::Type::getInt32Ty(VMContext), i)); 660 for (; i != NumDstElts; ++i) 661 ExtMask.push_back(llvm::UndefValue::get( 662 llvm::Type::getInt32Ty(VMContext))); 663 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 664 ExtMask.size()); 665 llvm::Value *ExtSrcVal = 666 Builder.CreateShuffleVector(SrcVal, 667 llvm::UndefValue::get(SrcVal->getType()), 668 ExtMaskV, "tmp"); 669 // build identity 670 llvm::SmallVector<llvm::Constant*, 4> Mask; 671 for (unsigned i = 0; i != NumDstElts; ++i) { 672 Mask.push_back(llvm::ConstantInt::get( 673 llvm::Type::getInt32Ty(VMContext), i)); 674 } 675 // modify when what gets shuffled in 676 for (unsigned i = 0; i != NumSrcElts; ++i) { 677 unsigned Idx = getAccessedFieldNo(i, Elts); 678 Mask[Idx] = llvm::ConstantInt::get( 679 llvm::Type::getInt32Ty(VMContext), i+NumDstElts); 680 } 681 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 682 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 683 } else { 684 // We should never shorten the vector 685 assert(0 && "unexpected shorten vector length"); 686 } 687 } else { 688 // If the Src is a scalar (not a vector) it must be updating one element. 689 unsigned InIdx = getAccessedFieldNo(0, Elts); 690 llvm::Value *Elt = llvm::ConstantInt::get( 691 llvm::Type::getInt32Ty(VMContext), InIdx); 692 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 693 } 694 695 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 696} 697 698LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 699 const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); 700 701 if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || 702 isa<ImplicitParamDecl>(VD))) { 703 LValue LV; 704 bool NonGCable = VD->hasLocalStorage() && 705 !VD->hasAttr<BlocksAttr>(); 706 if (VD->hasExternalStorage()) { 707 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 708 if (VD->getType()->isReferenceType()) 709 V = Builder.CreateLoad(V, "tmp"); 710 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 711 getContext().getObjCGCAttrKind(E->getType()), 712 E->getType().getAddressSpace()); 713 } else { 714 llvm::Value *V = LocalDeclMap[VD]; 715 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 716 // local variables do not get their gc attribute set. 717 QualType::GCAttrTypes attr = QualType::GCNone; 718 // local static? 719 if (!NonGCable) 720 attr = getContext().getObjCGCAttrKind(E->getType()); 721 if (VD->hasAttr<BlocksAttr>()) { 722 bool needsCopyDispose = BlockRequiresCopying(VD->getType()); 723 const llvm::Type *PtrStructTy = V->getType(); 724 const llvm::Type *Ty = PtrStructTy; 725 Ty = llvm::PointerType::get(Ty, 0); 726 V = Builder.CreateStructGEP(V, 1, "forwarding"); 727 V = Builder.CreateBitCast(V, Ty); 728 V = Builder.CreateLoad(V, false); 729 V = Builder.CreateBitCast(V, PtrStructTy); 730 V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); 731 } 732 if (VD->getType()->isReferenceType()) 733 V = Builder.CreateLoad(V, "tmp"); 734 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr, 735 E->getType().getAddressSpace()); 736 } 737 LValue::SetObjCNonGC(LV, NonGCable); 738 return LV; 739 } else if (VD && VD->isFileVarDecl()) { 740 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 741 if (VD->getType()->isReferenceType()) 742 V = Builder.CreateLoad(V, "tmp"); 743 LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 744 getContext().getObjCGCAttrKind(E->getType()), 745 E->getType().getAddressSpace()); 746 if (LV.isObjCStrong()) 747 LV.SetGlobalObjCRef(LV, true); 748 return LV; 749 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) { 750 llvm::Value* V = CGM.GetAddrOfFunction(GlobalDecl(FD)); 751 if (!FD->hasPrototype()) { 752 if (const FunctionProtoType *Proto = 753 FD->getType()->getAsFunctionProtoType()) { 754 // Ugly case: for a K&R-style definition, the type of the definition 755 // isn't the same as the type of a use. Correct for this with a 756 // bitcast. 757 QualType NoProtoType = 758 getContext().getFunctionNoProtoType(Proto->getResultType()); 759 NoProtoType = getContext().getPointerType(NoProtoType); 760 V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp"); 761 } 762 } 763 return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 764 getContext().getObjCGCAttrKind(E->getType()), 765 E->getType().getAddressSpace()); 766 } else if (const ImplicitParamDecl *IPD = 767 dyn_cast<ImplicitParamDecl>(E->getDecl())) { 768 llvm::Value *V = LocalDeclMap[IPD]; 769 assert(V && "BlockVarDecl not entered in LocalDeclMap?"); 770 return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 771 getContext().getObjCGCAttrKind(E->getType()), 772 E->getType().getAddressSpace()); 773 } 774 assert(0 && "Unimp declref"); 775 //an invalid LValue, but the assert will 776 //ensure that this point is never reached. 777 return LValue(); 778} 779 780LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 781 return LValue::MakeAddr(GetAddrOfBlockDecl(E), 782 E->getType().getCVRQualifiers(), 783 getContext().getObjCGCAttrKind(E->getType()), 784 E->getType().getAddressSpace()); 785} 786 787LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 788 // __extension__ doesn't affect lvalue-ness. 789 if (E->getOpcode() == UnaryOperator::Extension) 790 return EmitLValue(E->getSubExpr()); 791 792 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 793 switch (E->getOpcode()) { 794 default: assert(0 && "Unknown unary operator lvalue!"); 795 case UnaryOperator::Deref: 796 { 797 QualType T = E->getSubExpr()->getType()->getPointeeType(); 798 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 799 800 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), 801 T.getCVRQualifiers(), 802 getContext().getObjCGCAttrKind(T), 803 ExprTy.getAddressSpace()); 804 // We should not generate __weak write barrier on indirect reference 805 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 806 // But, we continue to generate __strong write barrier on indirect write 807 // into a pointer to object. 808 if (getContext().getLangOptions().ObjC1 && 809 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 810 LV.isObjCWeak()) 811 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 812 return LV; 813 } 814 case UnaryOperator::Real: 815 case UnaryOperator::Imag: 816 LValue LV = EmitLValue(E->getSubExpr()); 817 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 818 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 819 Idx, "idx"), 820 ExprTy.getCVRQualifiers(), 821 QualType::GCNone, 822 ExprTy.getAddressSpace()); 823 } 824} 825 826LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 827 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0); 828} 829 830LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 831 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0); 832} 833 834 835LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 836 std::string GlobalVarName; 837 838 switch (Type) { 839 default: 840 assert(0 && "Invalid type"); 841 case PredefinedExpr::Func: 842 GlobalVarName = "__func__."; 843 break; 844 case PredefinedExpr::Function: 845 GlobalVarName = "__FUNCTION__."; 846 break; 847 case PredefinedExpr::PrettyFunction: 848 // FIXME:: Demangle C++ method names 849 GlobalVarName = "__PRETTY_FUNCTION__."; 850 break; 851 } 852 853 // FIXME: This isn't right at all. The logic for computing this should go 854 // into a method on PredefinedExpr. This would allow sema and codegen to be 855 // consistent for things like sizeof(__func__) etc. 856 std::string FunctionName; 857 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 858 FunctionName = CGM.getMangledName(FD); 859 } else { 860 // Just get the mangled name; skipping the asm prefix if it 861 // exists. 862 FunctionName = CurFn->getName(); 863 if (FunctionName[0] == '\01') 864 FunctionName = FunctionName.substr(1, std::string::npos); 865 } 866 867 GlobalVarName += FunctionName; 868 llvm::Constant *C = 869 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 870 return LValue::MakeAddr(C, 0); 871} 872 873LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 874 switch (E->getIdentType()) { 875 default: 876 return EmitUnsupportedLValue(E, "predefined expression"); 877 case PredefinedExpr::Func: 878 case PredefinedExpr::Function: 879 case PredefinedExpr::PrettyFunction: 880 return EmitPredefinedFunctionName(E->getIdentType()); 881 } 882} 883 884LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 885 // The index must always be an integer, which is not an aggregate. Emit it. 886 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 887 QualType IdxTy = E->getIdx()->getType(); 888 bool IdxSigned = IdxTy->isSignedIntegerType(); 889 890 // If the base is a vector type, then we are forming a vector element lvalue 891 // with this subscript. 892 if (E->getBase()->getType()->isVectorType()) { 893 // Emit the vector as an lvalue to get its address. 894 LValue LHS = EmitLValue(E->getBase()); 895 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 896 Idx = Builder.CreateIntCast(Idx, 897 llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); 898 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 899 E->getBase()->getType().getCVRQualifiers()); 900 } 901 902 // The base must be a pointer, which is not an aggregate. Emit it. 903 llvm::Value *Base = EmitScalarExpr(E->getBase()); 904 905 // Extend or truncate the index type to 32 or 64-bits. 906 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 907 if (IdxBitwidth != LLVMPointerWidth) 908 Idx = Builder.CreateIntCast(Idx, 909 llvm::IntegerType::get(VMContext, LLVMPointerWidth), 910 IdxSigned, "idxprom"); 911 912 // We know that the pointer points to a type of the correct size, 913 // unless the size is a VLA or Objective-C interface. 914 llvm::Value *Address = 0; 915 if (const VariableArrayType *VAT = 916 getContext().getAsVariableArrayType(E->getType())) { 917 llvm::Value *VLASize = GetVLASize(VAT); 918 919 Idx = Builder.CreateMul(Idx, VLASize); 920 921 QualType BaseType = getContext().getBaseElementType(VAT); 922 923 uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; 924 Idx = Builder.CreateUDiv(Idx, 925 llvm::ConstantInt::get(Idx->getType(), 926 BaseTypeSize)); 927 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 928 } else if (const ObjCInterfaceType *OIT = 929 dyn_cast<ObjCInterfaceType>(E->getType())) { 930 llvm::Value *InterfaceSize = 931 llvm::ConstantInt::get(Idx->getType(), 932 getContext().getTypeSize(OIT) / 8); 933 934 Idx = Builder.CreateMul(Idx, InterfaceSize); 935 936 llvm::Type *i8PTy = 937 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); 938 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 939 Idx, "arrayidx"); 940 Address = Builder.CreateBitCast(Address, Base->getType()); 941 } else { 942 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 943 } 944 945 QualType T = E->getBase()->getType()->getPointeeType(); 946 assert(!T.isNull() && 947 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 948 949 LValue LV = LValue::MakeAddr(Address, 950 T.getCVRQualifiers(), 951 getContext().getObjCGCAttrKind(T), 952 E->getBase()->getType().getAddressSpace()); 953 if (getContext().getLangOptions().ObjC1 && 954 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) 955 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 956 return LV; 957} 958 959static 960llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 961 llvm::SmallVector<unsigned, 4> &Elts) { 962 llvm::SmallVector<llvm::Constant *, 4> CElts; 963 964 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 965 CElts.push_back(llvm::ConstantInt::get( 966 llvm::Type::getInt32Ty(VMContext), Elts[i])); 967 968 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 969} 970 971LValue CodeGenFunction:: 972EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 973 // Emit the base vector as an l-value. 974 LValue Base; 975 976 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 977 if (!E->isArrow()) { 978 assert(E->getBase()->getType()->isVectorType()); 979 Base = EmitLValue(E->getBase()); 980 } else { 981 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 982 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 983 Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers(), 984 QualType::GCNone, 985 PT->getPointeeType().getAddressSpace()); 986 } 987 988 // Encode the element access list into a vector of unsigned indices. 989 llvm::SmallVector<unsigned, 4> Indices; 990 E->getEncodedElementAccess(Indices); 991 992 if (Base.isSimple()) { 993 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 994 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 995 Base.getQualifiers()); 996 } 997 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 998 999 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1000 llvm::SmallVector<llvm::Constant *, 4> CElts; 1001 1002 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1003 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1004 CElts.push_back(llvm::ConstantInt::get( 1005 llvm::Type::getInt32Ty(VMContext), 0)); 1006 else 1007 CElts.push_back(BaseElts->getOperand(Indices[i])); 1008 } 1009 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1010 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1011 Base.getQualifiers()); 1012} 1013 1014LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1015 bool isUnion = false; 1016 bool isIvar = false; 1017 bool isNonGC = false; 1018 Expr *BaseExpr = E->getBase(); 1019 llvm::Value *BaseValue = NULL; 1020 unsigned CVRQualifiers=0; 1021 1022 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1023 if (E->isArrow()) { 1024 BaseValue = EmitScalarExpr(BaseExpr); 1025 const PointerType *PTy = 1026 BaseExpr->getType()->getAs<PointerType>(); 1027 if (PTy->getPointeeType()->isUnionType()) 1028 isUnion = true; 1029 CVRQualifiers = PTy->getPointeeType().getCVRQualifiers(); 1030 } else if (isa<ObjCPropertyRefExpr>(BaseExpr) || 1031 isa<ObjCImplicitSetterGetterRefExpr>(BaseExpr)) { 1032 RValue RV = EmitObjCPropertyGet(BaseExpr); 1033 BaseValue = RV.getAggregateAddr(); 1034 if (BaseExpr->getType()->isUnionType()) 1035 isUnion = true; 1036 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 1037 } else { 1038 LValue BaseLV = EmitLValue(BaseExpr); 1039 if (BaseLV.isObjCIvar()) 1040 isIvar = true; 1041 if (BaseLV.isNonGC()) 1042 isNonGC = true; 1043 // FIXME: this isn't right for bitfields. 1044 BaseValue = BaseLV.getAddress(); 1045 QualType BaseTy = BaseExpr->getType(); 1046 if (BaseTy->isUnionType()) 1047 isUnion = true; 1048 CVRQualifiers = BaseTy.getCVRQualifiers(); 1049 } 1050 1051 FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl()); 1052 // FIXME: Handle non-field member expressions 1053 assert(Field && "No code generation for non-field member references"); 1054 LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion, 1055 CVRQualifiers); 1056 LValue::SetObjCIvar(MemExpLV, isIvar); 1057 LValue::SetObjCNonGC(MemExpLV, isNonGC); 1058 return MemExpLV; 1059} 1060 1061LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1062 FieldDecl* Field, 1063 unsigned CVRQualifiers) { 1064 CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); 1065 1066 // FIXME: CodeGenTypes should expose a method to get the appropriate type for 1067 // FieldTy (the appropriate type is ABI-dependent). 1068 const llvm::Type *FieldTy = 1069 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1070 const llvm::PointerType *BaseTy = 1071 cast<llvm::PointerType>(BaseValue->getType()); 1072 unsigned AS = BaseTy->getAddressSpace(); 1073 BaseValue = Builder.CreateBitCast(BaseValue, 1074 llvm::PointerType::get(FieldTy, AS), 1075 "tmp"); 1076 1077 llvm::Value *Idx = 1078 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo); 1079 llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp"); 1080 1081 return LValue::MakeBitfield(V, Info.Start, Info.Size, 1082 Field->getType()->isSignedIntegerType(), 1083 Field->getType().getCVRQualifiers()|CVRQualifiers); 1084} 1085 1086LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1087 FieldDecl* Field, 1088 bool isUnion, 1089 unsigned CVRQualifiers) 1090{ 1091 if (Field->isBitField()) 1092 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1093 1094 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1095 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1096 1097 // Match union field type. 1098 if (isUnion) { 1099 const llvm::Type *FieldTy = 1100 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1101 const llvm::PointerType * BaseTy = 1102 cast<llvm::PointerType>(BaseValue->getType()); 1103 unsigned AS = BaseTy->getAddressSpace(); 1104 V = Builder.CreateBitCast(V, 1105 llvm::PointerType::get(FieldTy, AS), 1106 "tmp"); 1107 } 1108 if (Field->getType()->isReferenceType()) 1109 V = Builder.CreateLoad(V, "tmp"); 1110 1111 QualType::GCAttrTypes attr = QualType::GCNone; 1112 if (CGM.getLangOptions().ObjC1 && 1113 CGM.getLangOptions().getGCMode() != LangOptions::NonGC) { 1114 QualType Ty = Field->getType(); 1115 attr = Ty.getObjCGCAttr(); 1116 if (attr != QualType::GCNone) { 1117 // __weak attribute on a field is ignored. 1118 if (attr == QualType::Weak) 1119 attr = QualType::GCNone; 1120 } else if (Ty->isObjCObjectPointerType()) 1121 attr = QualType::Strong; 1122 } 1123 LValue LV = 1124 LValue::MakeAddr(V, 1125 Field->getType().getCVRQualifiers()|CVRQualifiers, 1126 attr, 1127 Field->getType().getAddressSpace()); 1128 return LV; 1129} 1130 1131LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1132 const llvm::Type *LTy = ConvertType(E->getType()); 1133 llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); 1134 1135 const Expr* InitExpr = E->getInitializer(); 1136 LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers(), 1137 QualType::GCNone, 1138 E->getType().getAddressSpace()); 1139 1140 if (E->getType()->isComplexType()) { 1141 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1142 } else if (hasAggregateLLVMType(E->getType())) { 1143 EmitAnyExpr(InitExpr, DeclPtr, false); 1144 } else { 1145 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1146 } 1147 1148 return Result; 1149} 1150 1151LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { 1152 if (E->isLvalue(getContext()) == Expr::LV_Valid) 1153 return EmitUnsupportedLValue(E, "conditional operator"); 1154 1155 // ?: here should be an aggregate. 1156 assert((hasAggregateLLVMType(E->getType()) && 1157 !E->getType()->isAnyComplexType()) && 1158 "Unexpected conditional operator!"); 1159 1160 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1161 EmitAggExpr(E, Temp, false); 1162 1163 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1164 getContext().getObjCGCAttrKind(E->getType()), 1165 E->getType().getAddressSpace()); 1166 1167} 1168 1169/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code 1170/// generator in an lvalue context, then it must mean that we need the address 1171/// of an aggregate in order to access one of its fields. This can happen for 1172/// all the reasons that casts are permitted with aggregate result, including 1173/// noop aggregate casts, and cast from scalar to union. 1174LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1175 if (E->getCastKind() == CastExpr::CK_UserDefinedConversion) { 1176 if (const CXXFunctionalCastExpr *CXXFExpr = 1177 dyn_cast<CXXFunctionalCastExpr>(E)) 1178 return LValue::MakeAddr( 1179 EmitCXXFunctionalCastExpr(CXXFExpr).getScalarVal(), 0); 1180 assert(isa<CStyleCastExpr>(E) && 1181 "EmitCastLValue - Expected CStyleCastExpr"); 1182 return EmitLValue(E->getSubExpr()); 1183 } 1184 1185 // If this is an aggregate-to-aggregate cast, just use the input's address as 1186 // the lvalue. 1187 if (E->getCastKind() == CastExpr::CK_NoOp) 1188 return EmitLValue(E->getSubExpr()); 1189 1190 // If this is an lvalue cast, treat it as a no-op. 1191 // FIXME: We shouldn't need to check for this explicitly! 1192 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) 1193 if (ICE->isLvalueCast()) 1194 return EmitLValue(E->getSubExpr()); 1195 1196 // Otherwise, we must have a cast from scalar to union. 1197 assert(E->getCastKind() == CastExpr::CK_ToUnion && 1198 "Expected scalar-to-union cast"); 1199 1200 // Casts are only lvalues when the source and destination types are the same. 1201 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1202 EmitAnyExpr(E->getSubExpr(), Temp, false); 1203 1204 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1205 getContext().getObjCGCAttrKind(E->getType()), 1206 E->getType().getAddressSpace()); 1207} 1208 1209//===--------------------------------------------------------------------===// 1210// Expression Emission 1211//===--------------------------------------------------------------------===// 1212 1213 1214RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { 1215 // Builtins never have block type. 1216 if (E->getCallee()->getType()->isBlockPointerType()) 1217 return EmitBlockCallExpr(E); 1218 1219 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1220 return EmitCXXMemberCallExpr(CE); 1221 1222 const Decl *TargetDecl = 0; 1223 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1224 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1225 TargetDecl = DRE->getDecl(); 1226 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1227 if (unsigned builtinID = FD->getBuiltinID(getContext())) 1228 return EmitBuiltinExpr(FD, builtinID, E); 1229 } 1230 } 1231 1232 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1233 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1234 return EmitCXXOperatorMemberCallExpr(CE, MD); 1235 1236 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1237 return EmitCall(Callee, E->getCallee()->getType(), 1238 E->arg_begin(), E->arg_end(), TargetDecl); 1239} 1240 1241LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1242 // Comma expressions just emit their LHS then their RHS as an l-value. 1243 if (E->getOpcode() == BinaryOperator::Comma) { 1244 EmitAnyExpr(E->getLHS()); 1245 return EmitLValue(E->getRHS()); 1246 } 1247 1248 // Can only get l-value for binary operator expressions which are a 1249 // simple assignment of aggregate type. 1250 if (E->getOpcode() != BinaryOperator::Assign) 1251 return EmitUnsupportedLValue(E, "binary l-value expression"); 1252 1253 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1254 EmitAggExpr(E, Temp, false); 1255 // FIXME: Are these qualifiers correct? 1256 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1257 getContext().getObjCGCAttrKind(E->getType()), 1258 E->getType().getAddressSpace()); 1259} 1260 1261LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1262 RValue RV = EmitCallExpr(E); 1263 1264 if (RV.isScalar()) { 1265 assert(E->getCallReturnType()->isReferenceType() && 1266 "Can't have a scalar return unless the return type is a " 1267 "reference type!"); 1268 1269 return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(), 1270 getContext().getObjCGCAttrKind(E->getType()), 1271 E->getType().getAddressSpace()); 1272 } 1273 1274 return LValue::MakeAddr(RV.getAggregateAddr(), 1275 E->getType().getCVRQualifiers(), 1276 getContext().getObjCGCAttrKind(E->getType()), 1277 E->getType().getAddressSpace()); 1278} 1279 1280LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1281 // FIXME: This shouldn't require another copy. 1282 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1283 EmitAggExpr(E, Temp, false); 1284 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1285 QualType::GCNone, E->getType().getAddressSpace()); 1286} 1287 1288LValue 1289CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { 1290 EmitLocalBlockVarDecl(*E->getVarDecl()); 1291 return EmitDeclRefLValue(E); 1292} 1293 1294LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1295 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp"); 1296 EmitCXXConstructExpr(Temp, E); 1297 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1298 QualType::GCNone, E->getType().getAddressSpace()); 1299} 1300 1301LValue 1302CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1303 LValue LV = EmitLValue(E->getSubExpr()); 1304 1305 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1306 1307 return LV; 1308} 1309 1310LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1311 // Can only get l-value for message expression returning aggregate type 1312 RValue RV = EmitObjCMessageExpr(E); 1313 // FIXME: can this be volatile? 1314 return LValue::MakeAddr(RV.getAggregateAddr(), 1315 E->getType().getCVRQualifiers(), 1316 getContext().getObjCGCAttrKind(E->getType()), 1317 E->getType().getAddressSpace()); 1318} 1319 1320llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1321 const ObjCIvarDecl *Ivar) { 1322 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1323} 1324 1325LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1326 llvm::Value *BaseValue, 1327 const ObjCIvarDecl *Ivar, 1328 unsigned CVRQualifiers) { 1329 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1330 Ivar, CVRQualifiers); 1331} 1332 1333LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1334 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1335 llvm::Value *BaseValue = 0; 1336 const Expr *BaseExpr = E->getBase(); 1337 unsigned CVRQualifiers = 0; 1338 QualType ObjectTy; 1339 if (E->isArrow()) { 1340 BaseValue = EmitScalarExpr(BaseExpr); 1341 ObjectTy = BaseExpr->getType()->getPointeeType(); 1342 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1343 } else { 1344 LValue BaseLV = EmitLValue(BaseExpr); 1345 // FIXME: this isn't right for bitfields. 1346 BaseValue = BaseLV.getAddress(); 1347 ObjectTy = BaseExpr->getType(); 1348 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1349 } 1350 1351 return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers); 1352} 1353 1354LValue 1355CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1356 // This is a special l-value that just issues sends when we load or 1357 // store through it. 1358 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1359} 1360 1361LValue 1362CodeGenFunction::EmitObjCKVCRefLValue( 1363 const ObjCImplicitSetterGetterRefExpr *E) { 1364 // This is a special l-value that just issues sends when we load or 1365 // store through it. 1366 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1367} 1368 1369LValue 1370CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1371 return EmitUnsupportedLValue(E, "use of super"); 1372} 1373 1374LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1375 1376 // Can only get l-value for message expression returning aggregate type 1377 RValue RV = EmitAnyExprToTemp(E); 1378 // FIXME: can this be volatile? 1379 return LValue::MakeAddr(RV.getAggregateAddr(), 1380 E->getType().getCVRQualifiers(), 1381 getContext().getObjCGCAttrKind(E->getType()), 1382 E->getType().getAddressSpace()); 1383} 1384 1385 1386RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, 1387 CallExpr::const_arg_iterator ArgBeg, 1388 CallExpr::const_arg_iterator ArgEnd, 1389 const Decl *TargetDecl) { 1390 // Get the actual function type. The callee type will always be a 1391 // pointer to function type or a block pointer type. 1392 assert(CalleeType->isFunctionPointerType() && 1393 "Call must have function pointer type!"); 1394 1395 QualType FnType = CalleeType->getAs<PointerType>()->getPointeeType(); 1396 QualType ResultType = FnType->getAsFunctionType()->getResultType(); 1397 1398 CallArgList Args; 1399 EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd); 1400 1401 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), 1402 Callee, Args, TargetDecl); 1403} 1404