CGExpr.cpp revision aa771a838a8f39633947ccc597e11d57e2839089
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGObjCRuntime.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/DeclObjC.h" 20#include "llvm/Target/TargetData.h" 21using namespace clang; 22using namespace CodeGen; 23 24//===--------------------------------------------------------------------===// 25// Miscellaneous Helper Methods 26//===--------------------------------------------------------------------===// 27 28/// CreateTempAlloca - This creates a alloca and inserts it into the entry 29/// block. 30llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 31 const char *Name) { 32 if (!Builder.isNamePreserving()) 33 Name = ""; 34 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 35} 36 37/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 38/// expression and compare the result against zero, returning an Int1Ty value. 39llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 40 QualType BoolTy = getContext().BoolTy; 41 if (!E->getType()->isAnyComplexType()) 42 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 43 44 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 45} 46 47/// EmitAnyExpr - Emit code to compute the specified expression which can have 48/// any type. The result is returned as an RValue struct. If this is an 49/// aggregate expression, the aggloc/agglocvolatile arguments indicate where 50/// the result should be returned. 51RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 52 bool isAggLocVolatile) { 53 if (!hasAggregateLLVMType(E->getType())) 54 return RValue::get(EmitScalarExpr(E)); 55 else if (E->getType()->isAnyComplexType()) 56 return RValue::getComplex(EmitComplexExpr(E)); 57 58 EmitAggExpr(E, AggLoc, isAggLocVolatile); 59 return RValue::getAggregate(AggLoc); 60} 61 62/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result 63/// will always be accessible even if no aggregate location is 64/// provided. 65RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc, 66 bool isAggLocVolatile) { 67 if (!AggLoc && hasAggregateLLVMType(E->getType()) && 68 !E->getType()->isAnyComplexType()) 69 AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); 70 return EmitAnyExpr(E, AggLoc, isAggLocVolatile); 71} 72 73/// getAccessedFieldNo - Given an encoded value and a result number, return 74/// the input field number being accessed. 75unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 76 const llvm::Constant *Elts) { 77 if (isa<llvm::ConstantAggregateZero>(Elts)) 78 return 0; 79 80 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 81} 82 83 84//===----------------------------------------------------------------------===// 85// LValue Expression Emission 86//===----------------------------------------------------------------------===// 87 88RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 89 if (Ty->isVoidType()) { 90 return RValue::get(0); 91 } else if (const ComplexType *CTy = Ty->getAsComplexType()) { 92 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 93 llvm::Value *U = llvm::UndefValue::get(EltTy); 94 return RValue::getComplex(std::make_pair(U, U)); 95 } else if (hasAggregateLLVMType(Ty)) { 96 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 97 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 98 } else { 99 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 100 } 101} 102 103RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 104 const char *Name) { 105 ErrorUnsupported(E, Name); 106 return GetUndefRValue(E->getType()); 107} 108 109LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 110 const char *Name) { 111 ErrorUnsupported(E, Name); 112 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 113 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 114 E->getType().getCVRQualifiers(), 115 getContext().getObjCGCAttrKind(E->getType())); 116} 117 118/// EmitLValue - Emit code to compute a designator that specifies the location 119/// of the expression. 120/// 121/// This can return one of two things: a simple address or a bitfield 122/// reference. In either case, the LLVM Value* in the LValue structure is 123/// guaranteed to be an LLVM pointer type. 124/// 125/// If this returns a bitfield reference, nothing about the pointee type of 126/// the LLVM value is known: For example, it may not be a pointer to an 127/// integer. 128/// 129/// If this returns a normal address, and if the lvalue's C type is fixed 130/// size, this method guarantees that the returned pointer type will point to 131/// an LLVM type of the same size of the lvalue's type. If the lvalue has a 132/// variable length type, this is not possible. 133/// 134LValue CodeGenFunction::EmitLValue(const Expr *E) { 135 switch (E->getStmtClass()) { 136 default: return EmitUnsupportedLValue(E, "l-value expression"); 137 138 case Expr::BinaryOperatorClass: 139 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 140 case Expr::CallExprClass: 141 case Expr::CXXOperatorCallExprClass: 142 return EmitCallExprLValue(cast<CallExpr>(E)); 143 case Expr::VAArgExprClass: 144 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 145 case Expr::DeclRefExprClass: 146 case Expr::QualifiedDeclRefExprClass: 147 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 148 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 149 case Expr::PredefinedExprClass: 150 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 151 case Expr::StringLiteralClass: 152 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 153 case Expr::ObjCEncodeExprClass: 154 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 155 156 case Expr::BlockDeclRefExprClass: 157 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 158 159 case Expr::CXXConditionDeclExprClass: 160 return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); 161 162 case Expr::ObjCMessageExprClass: 163 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 164 case Expr::ObjCIvarRefExprClass: 165 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 166 case Expr::ObjCPropertyRefExprClass: 167 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 168 case Expr::ObjCKVCRefExprClass: 169 return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E)); 170 case Expr::ObjCSuperExprClass: 171 return EmitObjCSuperExpr(cast<ObjCSuperExpr>(E)); 172 173 case Expr::UnaryOperatorClass: 174 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 175 case Expr::ArraySubscriptExprClass: 176 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 177 case Expr::ExtVectorElementExprClass: 178 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 179 case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); 180 case Expr::CompoundLiteralExprClass: 181 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 182 case Expr::ConditionalOperatorClass: 183 return EmitConditionalOperator(cast<ConditionalOperator>(E)); 184 case Expr::ChooseExprClass: 185 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 186 case Expr::ImplicitCastExprClass: 187 case Expr::CStyleCastExprClass: 188 case Expr::CXXFunctionalCastExprClass: 189 case Expr::CXXStaticCastExprClass: 190 case Expr::CXXDynamicCastExprClass: 191 case Expr::CXXReinterpretCastExprClass: 192 case Expr::CXXConstCastExprClass: 193 return EmitCastLValue(cast<CastExpr>(E)); 194 } 195} 196 197llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 198 QualType Ty) { 199 llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); 200 201 // Bool can have different representation in memory than in 202 // registers. 203 if (Ty->isBooleanType()) 204 if (V->getType() != llvm::Type::Int1Ty) 205 V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool"); 206 207 return V; 208} 209 210void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 211 bool Volatile) { 212 // Handle stores of types which have different representations in 213 // memory and as LLVM values. 214 215 // FIXME: We shouldn't be this loose, we should only do this 216 // conversion when we have a type we know has a different memory 217 // representation (e.g., bool). 218 219 const llvm::Type *SrcTy = Value->getType(); 220 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 221 if (DstPtr->getElementType() != SrcTy) { 222 const llvm::Type *MemTy = 223 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 224 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 225 } 226 227 Builder.CreateStore(Value, Addr, Volatile); 228} 229 230/// EmitLoadOfLValue - Given an expression that represents a value lvalue, 231/// this method emits the address of the lvalue, then loads the result as an 232/// rvalue, returning the rvalue. 233RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 234 if (LV.isObjCWeak()) { 235 // load of a __weak object. 236 llvm::Value *AddrWeakObj = LV.getAddress(); 237 llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, 238 AddrWeakObj); 239 return RValue::get(read_weak); 240 } 241 242 if (LV.isSimple()) { 243 llvm::Value *Ptr = LV.getAddress(); 244 const llvm::Type *EltTy = 245 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 246 247 // Simple scalar l-value. 248 if (EltTy->isSingleValueType()) 249 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 250 ExprType)); 251 252 assert(ExprType->isFunctionType() && "Unknown scalar value"); 253 return RValue::get(Ptr); 254 } 255 256 if (LV.isVectorElt()) { 257 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 258 LV.isVolatileQualified(), "tmp"); 259 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 260 "vecext")); 261 } 262 263 // If this is a reference to a subset of the elements of a vector, either 264 // shuffle the input or extract/insert them as appropriate. 265 if (LV.isExtVectorElt()) 266 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 267 268 if (LV.isBitfield()) 269 return EmitLoadOfBitfieldLValue(LV, ExprType); 270 271 if (LV.isPropertyRef()) 272 return EmitLoadOfPropertyRefLValue(LV, ExprType); 273 274 assert(LV.isKVCRef() && "Unknown LValue type!"); 275 return EmitLoadOfKVCRefLValue(LV, ExprType); 276} 277 278RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 279 QualType ExprType) { 280 unsigned StartBit = LV.getBitfieldStartBit(); 281 unsigned BitfieldSize = LV.getBitfieldSize(); 282 llvm::Value *Ptr = LV.getBitfieldAddr(); 283 284 const llvm::Type *EltTy = 285 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 286 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 287 288 // In some cases the bitfield may straddle two memory locations. 289 // Currently we load the entire bitfield, then do the magic to 290 // sign-extend it if necessary. This results in somewhat more code 291 // than necessary for the common case (one load), since two shifts 292 // accomplish both the masking and sign extension. 293 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 294 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 295 296 // Shift to proper location. 297 if (StartBit) 298 Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), 299 "bf.lo"); 300 301 // Mask off unused bits. 302 llvm::Constant *LowMask = 303 llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 304 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 305 306 // Fetch the high bits if necessary. 307 if (LowBits < BitfieldSize) { 308 unsigned HighBits = BitfieldSize - LowBits; 309 llvm::Value *HighPtr = 310 Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), 311 "bf.ptr.hi"); 312 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 313 LV.isVolatileQualified(), 314 "tmp"); 315 316 // Mask off unused bits. 317 llvm::Constant *HighMask = 318 llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 319 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 320 321 // Shift to proper location and or in to bitfield value. 322 HighVal = Builder.CreateShl(HighVal, 323 llvm::ConstantInt::get(EltTy, LowBits)); 324 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 325 } 326 327 // Sign extend if necessary. 328 if (LV.isBitfieldSigned()) { 329 llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, 330 EltTySize - BitfieldSize); 331 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 332 ExtraBits, "bf.val.sext"); 333 } 334 335 // The bitfield type and the normal type differ when the storage sizes 336 // differ (currently just _Bool). 337 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 338 339 return RValue::get(Val); 340} 341 342RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 343 QualType ExprType) { 344 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 345} 346 347RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 348 QualType ExprType) { 349 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 350} 351 352// If this is a reference to a subset of the elements of a vector, create an 353// appropriate shufflevector. 354RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 355 QualType ExprType) { 356 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 357 LV.isVolatileQualified(), "tmp"); 358 359 const llvm::Constant *Elts = LV.getExtVectorElts(); 360 361 // If the result of the expression is a non-vector type, we must be 362 // extracting a single element. Just codegen as an extractelement. 363 const VectorType *ExprVT = ExprType->getAsVectorType(); 364 if (!ExprVT) { 365 unsigned InIdx = getAccessedFieldNo(0, Elts); 366 llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); 367 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 368 } 369 370 // Always use shuffle vector to try to retain the original program structure 371 unsigned NumResultElts = ExprVT->getNumElements(); 372 373 llvm::SmallVector<llvm::Constant*, 4> Mask; 374 for (unsigned i = 0; i != NumResultElts; ++i) { 375 unsigned InIdx = getAccessedFieldNo(i, Elts); 376 Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); 377 } 378 379 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 380 Vec = Builder.CreateShuffleVector(Vec, 381 llvm::UndefValue::get(Vec->getType()), 382 MaskV, "tmp"); 383 return RValue::get(Vec); 384} 385 386 387 388/// EmitStoreThroughLValue - Store the specified rvalue into the specified 389/// lvalue, where both are guaranteed to the have the same type, and that type 390/// is 'Ty'. 391void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 392 QualType Ty) { 393 if (!Dst.isSimple()) { 394 if (Dst.isVectorElt()) { 395 // Read/modify/write the vector, inserting the new element. 396 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 397 Dst.isVolatileQualified(), "tmp"); 398 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 399 Dst.getVectorIdx(), "vecins"); 400 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 401 return; 402 } 403 404 // If this is an update of extended vector elements, insert them as 405 // appropriate. 406 if (Dst.isExtVectorElt()) 407 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 408 409 if (Dst.isBitfield()) 410 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 411 412 if (Dst.isPropertyRef()) 413 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 414 415 if (Dst.isKVCRef()) 416 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 417 418 assert(0 && "Unknown LValue type"); 419 } 420 421 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 422 // load of a __weak object. 423 llvm::Value *LvalueDst = Dst.getAddress(); 424 llvm::Value *src = Src.getScalarVal(); 425 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 426 return; 427 } 428 429 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 430 // load of a __strong object. 431 llvm::Value *LvalueDst = Dst.getAddress(); 432 llvm::Value *src = Src.getScalarVal(); 433#if 0 434 // FIXME. We cannot positively determine if we have an 435 // 'ivar' assignment, object assignment or an unknown 436 // assignment. For now, generate call to objc_assign_strongCast 437 // assignment which is a safe, but consevative assumption. 438 if (Dst.isObjCIvar()) 439 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst); 440 else 441 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 442#endif 443 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 444 return; 445 } 446 447 assert(Src.isScalar() && "Can't emit an agg store with this method"); 448 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 449 Dst.isVolatileQualified()); 450} 451 452void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 453 QualType Ty, 454 llvm::Value **Result) { 455 unsigned StartBit = Dst.getBitfieldStartBit(); 456 unsigned BitfieldSize = Dst.getBitfieldSize(); 457 llvm::Value *Ptr = Dst.getBitfieldAddr(); 458 459 const llvm::Type *EltTy = 460 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 461 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 462 463 // Get the new value, cast to the appropriate type and masked to 464 // exactly the size of the bit-field. 465 llvm::Value *SrcVal = Src.getScalarVal(); 466 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 467 llvm::Constant *Mask = 468 llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 469 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 470 471 // Return the new value of the bit-field, if requested. 472 if (Result) { 473 // Cast back to the proper type for result. 474 const llvm::Type *SrcTy = SrcVal->getType(); 475 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 476 "bf.reload.val"); 477 478 // Sign extend if necessary. 479 if (Dst.isBitfieldSigned()) { 480 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 481 llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, 482 SrcTySize - BitfieldSize); 483 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 484 ExtraBits, "bf.reload.sext"); 485 } 486 487 *Result = SrcTrunc; 488 } 489 490 // In some cases the bitfield may straddle two memory locations. 491 // Emit the low part first and check to see if the high needs to be 492 // done. 493 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 494 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 495 "bf.prev.low"); 496 497 // Compute the mask for zero-ing the low part of this bitfield. 498 llvm::Constant *InvMask = 499 llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit, 500 StartBit + LowBits)); 501 502 // Compute the new low part as 503 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 504 // with the shift of NewVal implicitly stripping the high bits. 505 llvm::Value *NewLowVal = 506 Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), 507 "bf.value.lo"); 508 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 509 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 510 511 // Write back. 512 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 513 514 // If the low part doesn't cover the bitfield emit a high part. 515 if (LowBits < BitfieldSize) { 516 unsigned HighBits = BitfieldSize - LowBits; 517 llvm::Value *HighPtr = 518 Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), 519 "bf.ptr.hi"); 520 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 521 Dst.isVolatileQualified(), 522 "bf.prev.hi"); 523 524 // Compute the mask for zero-ing the high part of this bitfield. 525 llvm::Constant *InvMask = 526 llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 527 528 // Compute the new high part as 529 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 530 // where the high bits of NewVal have already been cleared and the 531 // shift stripping the low bits. 532 llvm::Value *NewHighVal = 533 Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), 534 "bf.value.high"); 535 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 536 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 537 538 // Write back. 539 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 540 } 541} 542 543void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 544 LValue Dst, 545 QualType Ty) { 546 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 547} 548 549void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 550 LValue Dst, 551 QualType Ty) { 552 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 553} 554 555void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 556 LValue Dst, 557 QualType Ty) { 558 // This access turns into a read/modify/write of the vector. Load the input 559 // value now. 560 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 561 Dst.isVolatileQualified(), "tmp"); 562 const llvm::Constant *Elts = Dst.getExtVectorElts(); 563 564 llvm::Value *SrcVal = Src.getScalarVal(); 565 566 if (const VectorType *VTy = Ty->getAsVectorType()) { 567 unsigned NumSrcElts = VTy->getNumElements(); 568 unsigned NumDstElts = 569 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 570 if (NumDstElts == NumSrcElts) { 571 // Use shuffle vector is the src and destination are the same number 572 // of elements 573 llvm::SmallVector<llvm::Constant*, 4> Mask; 574 for (unsigned i = 0; i != NumSrcElts; ++i) { 575 unsigned InIdx = getAccessedFieldNo(i, Elts); 576 Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); 577 } 578 579 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 580 Vec = Builder.CreateShuffleVector(SrcVal, 581 llvm::UndefValue::get(Vec->getType()), 582 MaskV, "tmp"); 583 } 584 else if (NumDstElts > NumSrcElts) { 585 // Extended the source vector to the same length and then shuffle it 586 // into the destination. 587 // FIXME: since we're shuffling with undef, can we just use the indices 588 // into that? This could be simpler. 589 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 590 unsigned i; 591 for (i = 0; i != NumSrcElts; ++i) 592 ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); 593 for (; i != NumDstElts; ++i) 594 ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty)); 595 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 596 ExtMask.size()); 597 llvm::Value *ExtSrcVal = 598 Builder.CreateShuffleVector(SrcVal, 599 llvm::UndefValue::get(SrcVal->getType()), 600 ExtMaskV, "tmp"); 601 // build identity 602 llvm::SmallVector<llvm::Constant*, 4> Mask; 603 for (unsigned i = 0; i != NumDstElts; ++i) { 604 Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); 605 } 606 // modify when what gets shuffled in 607 for (unsigned i = 0; i != NumSrcElts; ++i) { 608 unsigned Idx = getAccessedFieldNo(i, Elts); 609 Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts); 610 } 611 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 612 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 613 } 614 else { 615 // We should never shorten the vector 616 assert(0 && "unexpected shorten vector length"); 617 } 618 } else { 619 // If the Src is a scalar (not a vector) it must be updating one element. 620 unsigned InIdx = getAccessedFieldNo(0, Elts); 621 llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); 622 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 623 } 624 625 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 626} 627 628LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 629 const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); 630 631 if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || 632 isa<ImplicitParamDecl>(VD))) { 633 LValue LV; 634 bool GCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 635 if (VD->hasExternalStorage()) { 636 LV = LValue::MakeAddr(CGM.GetAddrOfGlobalVar(VD), 637 E->getType().getCVRQualifiers(), 638 getContext().getObjCGCAttrKind(E->getType())); 639 } 640 else { 641 llvm::Value *V = LocalDeclMap[VD]; 642 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 643 // local variables do not get their gc attribute set. 644 QualType::GCAttrTypes attr = QualType::GCNone; 645 // local static? 646 if (!GCable) 647 attr = getContext().getObjCGCAttrKind(E->getType()); 648 if (VD->hasAttr<BlocksAttr>()) { 649 bool needsCopyDispose = BlockRequiresCopying(VD->getType()); 650 const llvm::Type *PtrStructTy = V->getType(); 651 const llvm::Type *Ty = PtrStructTy; 652 Ty = llvm::PointerType::get(Ty, 0); 653 V = Builder.CreateStructGEP(V, 1, "forwarding"); 654 V = Builder.CreateBitCast(V, Ty); 655 V = Builder.CreateLoad(V, false); 656 V = Builder.CreateBitCast(V, PtrStructTy); 657 V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); 658 } 659 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr); 660 } 661 LValue::SetObjCNonGC(LV, GCable); 662 return LV; 663 } else if (VD && VD->isFileVarDecl()) { 664 LValue LV = LValue::MakeAddr(CGM.GetAddrOfGlobalVar(VD), 665 E->getType().getCVRQualifiers(), 666 getContext().getObjCGCAttrKind(E->getType())); 667 return LV; 668 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) { 669 return LValue::MakeAddr(CGM.GetAddrOfFunction(FD), 670 E->getType().getCVRQualifiers(), 671 getContext().getObjCGCAttrKind(E->getType())); 672 } 673 else if (const ImplicitParamDecl *IPD = 674 dyn_cast<ImplicitParamDecl>(E->getDecl())) { 675 llvm::Value *V = LocalDeclMap[IPD]; 676 assert(V && "BlockVarDecl not entered in LocalDeclMap?"); 677 return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 678 getContext().getObjCGCAttrKind(E->getType())); 679 } 680 assert(0 && "Unimp declref"); 681 //an invalid LValue, but the assert will 682 //ensure that this point is never reached. 683 return LValue(); 684} 685 686LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 687 return LValue::MakeAddr(GetAddrOfBlockDecl(E), 0); 688} 689 690LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 691 // __extension__ doesn't affect lvalue-ness. 692 if (E->getOpcode() == UnaryOperator::Extension) 693 return EmitLValue(E->getSubExpr()); 694 695 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 696 switch (E->getOpcode()) { 697 default: assert(0 && "Unknown unary operator lvalue!"); 698 case UnaryOperator::Deref: 699 { 700 QualType T = 701 E->getSubExpr()->getType()->getAsPointerType()->getPointeeType(); 702 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), 703 ExprTy->getAsPointerType()->getPointeeType() 704 .getCVRQualifiers(), 705 getContext().getObjCGCAttrKind(T)); 706 // We should not generate __weak write barrier on indirect reference 707 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 708 // But, we continue to generate __strong write barrier on indirect write 709 // into a pointer to object. 710 if (getContext().getLangOptions().ObjC1 && 711 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 712 LV.isObjCWeak()) 713 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate()); 714 return LV; 715 } 716 case UnaryOperator::Real: 717 case UnaryOperator::Imag: 718 LValue LV = EmitLValue(E->getSubExpr()); 719 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 720 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 721 Idx, "idx"), 722 ExprTy.getCVRQualifiers()); 723 } 724} 725 726LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 727 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0); 728} 729 730LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 731 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0); 732} 733 734 735LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 736 std::string GlobalVarName; 737 738 switch (Type) { 739 default: 740 assert(0 && "Invalid type"); 741 case PredefinedExpr::Func: 742 GlobalVarName = "__func__."; 743 break; 744 case PredefinedExpr::Function: 745 GlobalVarName = "__FUNCTION__."; 746 break; 747 case PredefinedExpr::PrettyFunction: 748 // FIXME:: Demangle C++ method names 749 GlobalVarName = "__PRETTY_FUNCTION__."; 750 break; 751 } 752 753 std::string FunctionName; 754 if(const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl)) { 755 FunctionName = CGM.getMangledName(FD); 756 } else { 757 // Just get the mangled name; skipping the asm prefix if it 758 // exists. 759 FunctionName = CurFn->getName(); 760 if (FunctionName[0] == '\01') 761 FunctionName = FunctionName.substr(1, std::string::npos); 762 } 763 764 GlobalVarName += FunctionName; 765 llvm::Constant *C = 766 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 767 return LValue::MakeAddr(C, 0); 768} 769 770LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 771 switch (E->getIdentType()) { 772 default: 773 return EmitUnsupportedLValue(E, "predefined expression"); 774 case PredefinedExpr::Func: 775 case PredefinedExpr::Function: 776 case PredefinedExpr::PrettyFunction: 777 return EmitPredefinedFunctionName(E->getIdentType()); 778 } 779} 780 781LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 782 // The index must always be an integer, which is not an aggregate. Emit it. 783 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 784 785 // If the base is a vector type, then we are forming a vector element lvalue 786 // with this subscript. 787 if (E->getBase()->getType()->isVectorType()) { 788 // Emit the vector as an lvalue to get its address. 789 LValue LHS = EmitLValue(E->getBase()); 790 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 791 // FIXME: This should properly sign/zero/extend or truncate Idx to i32. 792 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 793 E->getBase()->getType().getCVRQualifiers()); 794 } 795 796 // The base must be a pointer, which is not an aggregate. Emit it. 797 llvm::Value *Base = EmitScalarExpr(E->getBase()); 798 799 // Extend or truncate the index type to 32 or 64-bits. 800 QualType IdxTy = E->getIdx()->getType(); 801 bool IdxSigned = IdxTy->isSignedIntegerType(); 802 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 803 804 // If Pointer width is less than 32 than extend to 32. 805 unsigned IdxValidWidth = (LLVMPointerWidth < 32 ) ? 32 : LLVMPointerWidth; 806 if (IdxBitwidth != IdxValidWidth) 807 Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(IdxValidWidth), 808 IdxSigned, "idxprom"); 809 810 // We know that the pointer points to a type of the correct size, unless the 811 // size is a VLA. 812 if (const VariableArrayType *VAT = 813 getContext().getAsVariableArrayType(E->getType())) { 814 llvm::Value *VLASize = VLASizeMap[VAT]; 815 816 Idx = Builder.CreateMul(Idx, VLASize); 817 818 QualType BaseType = getContext().getBaseElementType(VAT); 819 820 uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; 821 Idx = Builder.CreateUDiv(Idx, 822 llvm::ConstantInt::get(Idx->getType(), 823 BaseTypeSize)); 824 } 825 826 QualType T = E->getBase()->getType(); 827 QualType ExprTy = getContext().getCanonicalType(T); 828 T = T->getAsPointerType()->getPointeeType(); 829 LValue LV = 830 LValue::MakeAddr(Builder.CreateGEP(Base, Idx, "arrayidx"), 831 ExprTy->getAsPointerType()->getPointeeType().getCVRQualifiers(), 832 getContext().getObjCGCAttrKind(T)); 833 if (getContext().getLangOptions().ObjC1 && 834 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) 835 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate()); 836 return LV; 837} 838 839static 840llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) { 841 llvm::SmallVector<llvm::Constant *, 4> CElts; 842 843 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 844 CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i])); 845 846 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 847} 848 849LValue CodeGenFunction:: 850EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 851 // Emit the base vector as an l-value. 852 LValue Base; 853 854 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 855 if (!E->isArrow()) { 856 assert(E->getBase()->getType()->isVectorType()); 857 Base = EmitLValue(E->getBase()); 858 } else { 859 const PointerType *PT = E->getBase()->getType()->getAsPointerType(); 860 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 861 Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers()); 862 } 863 864 // Encode the element access list into a vector of unsigned indices. 865 llvm::SmallVector<unsigned, 4> Indices; 866 E->getEncodedElementAccess(Indices); 867 868 if (Base.isSimple()) { 869 llvm::Constant *CV = GenerateConstantVector(Indices); 870 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 871 Base.getQualifiers()); 872 } 873 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 874 875 llvm::Constant *BaseElts = Base.getExtVectorElts(); 876 llvm::SmallVector<llvm::Constant *, 4> CElts; 877 878 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 879 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 880 CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); 881 else 882 CElts.push_back(BaseElts->getOperand(Indices[i])); 883 } 884 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 885 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 886 Base.getQualifiers()); 887} 888 889LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 890 bool isUnion = false; 891 bool isIvar = false; 892 bool isNonGC = false; 893 Expr *BaseExpr = E->getBase(); 894 llvm::Value *BaseValue = NULL; 895 unsigned CVRQualifiers=0; 896 897 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 898 if (E->isArrow()) { 899 BaseValue = EmitScalarExpr(BaseExpr); 900 const PointerType *PTy = 901 cast<PointerType>(getContext().getCanonicalType(BaseExpr->getType())); 902 if (PTy->getPointeeType()->isUnionType()) 903 isUnion = true; 904 CVRQualifiers = PTy->getPointeeType().getCVRQualifiers(); 905 } else if (isa<ObjCPropertyRefExpr>(BaseExpr) || 906 isa<ObjCKVCRefExpr>(BaseExpr)) { 907 RValue RV = EmitObjCPropertyGet(BaseExpr); 908 BaseValue = RV.getAggregateAddr(); 909 if (BaseExpr->getType()->isUnionType()) 910 isUnion = true; 911 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 912 } else { 913 LValue BaseLV = EmitLValue(BaseExpr); 914 if (BaseLV.isObjCIvar()) 915 isIvar = true; 916 if (BaseLV.isNonGC()) 917 isNonGC = true; 918 // FIXME: this isn't right for bitfields. 919 BaseValue = BaseLV.getAddress(); 920 if (BaseExpr->getType()->isUnionType()) 921 isUnion = true; 922 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 923 } 924 925 FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl()); 926 // FIXME: Handle non-field member expressions 927 assert(Field && "No code generation for non-field member references"); 928 LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion, 929 CVRQualifiers); 930 LValue::SetObjCIvar(MemExpLV, isIvar); 931 LValue::SetObjCNonGC(MemExpLV, isNonGC); 932 return MemExpLV; 933} 934 935LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 936 FieldDecl* Field, 937 unsigned CVRQualifiers) { 938 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 939 // FIXME: CodeGenTypes should expose a method to get the appropriate 940 // type for FieldTy (the appropriate type is ABI-dependent). 941 const llvm::Type *FieldTy = 942 CGM.getTypes().ConvertTypeForMem(Field->getType()); 943 const llvm::PointerType *BaseTy = 944 cast<llvm::PointerType>(BaseValue->getType()); 945 unsigned AS = BaseTy->getAddressSpace(); 946 BaseValue = Builder.CreateBitCast(BaseValue, 947 llvm::PointerType::get(FieldTy, AS), 948 "tmp"); 949 llvm::Value *V = Builder.CreateGEP(BaseValue, 950 llvm::ConstantInt::get(llvm::Type::Int32Ty, idx), 951 "tmp"); 952 953 CodeGenTypes::BitFieldInfo bitFieldInfo = 954 CGM.getTypes().getBitFieldInfo(Field); 955 return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size, 956 Field->getType()->isSignedIntegerType(), 957 Field->getType().getCVRQualifiers()|CVRQualifiers); 958} 959 960LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 961 FieldDecl* Field, 962 bool isUnion, 963 unsigned CVRQualifiers) 964{ 965 if (Field->isBitField()) 966 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 967 968 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 969 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 970 971 // Match union field type. 972 if (isUnion) { 973 const llvm::Type *FieldTy = 974 CGM.getTypes().ConvertTypeForMem(Field->getType()); 975 const llvm::PointerType * BaseTy = 976 cast<llvm::PointerType>(BaseValue->getType()); 977 unsigned AS = BaseTy->getAddressSpace(); 978 V = Builder.CreateBitCast(V, 979 llvm::PointerType::get(FieldTy, AS), 980 "tmp"); 981 } 982 983 QualType::GCAttrTypes attr = QualType::GCNone; 984 if (CGM.getLangOptions().ObjC1 && 985 CGM.getLangOptions().getGCMode() != LangOptions::NonGC) { 986 QualType Ty = Field->getType(); 987 attr = Ty.getObjCGCAttr(); 988 if (attr != QualType::GCNone) { 989 // __weak attribute on a field is ignored. 990 if (attr == QualType::Weak) 991 attr = QualType::GCNone; 992 } 993 else if (getContext().isObjCObjectPointerType(Ty)) 994 attr = QualType::Strong; 995 } 996 LValue LV = 997 LValue::MakeAddr(V, 998 Field->getType().getCVRQualifiers()|CVRQualifiers, 999 attr); 1000 return LV; 1001} 1002 1003LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1004 const llvm::Type *LTy = ConvertType(E->getType()); 1005 llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); 1006 1007 const Expr* InitExpr = E->getInitializer(); 1008 LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers()); 1009 1010 if (E->getType()->isComplexType()) { 1011 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1012 } else if (hasAggregateLLVMType(E->getType())) { 1013 EmitAnyExpr(InitExpr, DeclPtr, false); 1014 } else { 1015 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1016 } 1017 1018 return Result; 1019} 1020 1021LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { 1022 // We don't handle vectors yet. 1023 if (E->getType()->isVectorType()) 1024 return EmitUnsupportedLValue(E, "conditional operator"); 1025 1026 // ?: here should be an aggregate. 1027 assert((hasAggregateLLVMType(E->getType()) && 1028 !E->getType()->isAnyComplexType()) && 1029 "Unexpected conditional operator!"); 1030 1031 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1032 EmitAggExpr(E, Temp, false); 1033 1034 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1035 getContext().getObjCGCAttrKind(E->getType())); 1036 1037} 1038 1039/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code 1040/// generator in an lvalue context, then it must mean that we need the address 1041/// of an aggregate in order to access one of its fields. This can happen for 1042/// all the reasons that casts are permitted with aggregate result, including 1043/// noop aggregate casts, and cast from scalar to union. 1044LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1045 // If this is an aggregate-to-aggregate cast, just use the input's address as 1046 // the lvalue. 1047 if (getContext().hasSameUnqualifiedType(E->getType(), 1048 E->getSubExpr()->getType())) 1049 return EmitLValue(E->getSubExpr()); 1050 1051 // Otherwise, we must have a cast from scalar to union. 1052 assert(E->getType()->isUnionType() && "Expected scalar-to-union cast"); 1053 1054 // Casts are only lvalues when the source and destination types are the same. 1055 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1056 EmitAnyExpr(E->getSubExpr(), Temp, false); 1057 1058 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1059 getContext().getObjCGCAttrKind(E->getType())); 1060} 1061 1062//===--------------------------------------------------------------------===// 1063// Expression Emission 1064//===--------------------------------------------------------------------===// 1065 1066 1067RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { 1068 // Builtins never have block type. 1069 if (E->getCallee()->getType()->isBlockPointerType()) 1070 return EmitBlockCallExpr(E); 1071 1072 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1073 return EmitCXXMemberCallExpr(CE); 1074 1075 const Decl *TargetDecl = 0; 1076 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1077 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1078 TargetDecl = DRE->getDecl(); 1079 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1080 if (unsigned builtinID = FD->getBuiltinID(getContext())) 1081 return EmitBuiltinExpr(FD, builtinID, E); 1082 } 1083 } 1084 1085 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1086 return EmitCallExpr(Callee, E->getCallee()->getType(), 1087 E->arg_begin(), E->arg_end(), TargetDecl); 1088} 1089 1090LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1091 // Can only get l-value for binary operator expressions which are a 1092 // simple assignment of aggregate type. 1093 if (E->getOpcode() != BinaryOperator::Assign) 1094 return EmitUnsupportedLValue(E, "binary l-value expression"); 1095 1096 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1097 EmitAggExpr(E, Temp, false); 1098 // FIXME: Are these qualifiers correct? 1099 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1100 getContext().getObjCGCAttrKind(E->getType())); 1101} 1102 1103LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1104 // Can only get l-value for call expression returning aggregate type 1105 RValue RV = EmitCallExpr(E); 1106 return LValue::MakeAddr(RV.getAggregateAddr(), 1107 E->getType().getCVRQualifiers(), 1108 getContext().getObjCGCAttrKind(E->getType())); 1109} 1110 1111LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1112 // FIXME: This shouldn't require another copy. 1113 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1114 EmitAggExpr(E, Temp, false); 1115 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers()); 1116} 1117 1118LValue 1119CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { 1120 EmitLocalBlockVarDecl(*E->getVarDecl()); 1121 return EmitDeclRefLValue(E); 1122} 1123 1124LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1125 // Can only get l-value for message expression returning aggregate type 1126 RValue RV = EmitObjCMessageExpr(E); 1127 // FIXME: can this be volatile? 1128 return LValue::MakeAddr(RV.getAggregateAddr(), 1129 E->getType().getCVRQualifiers(), 1130 getContext().getObjCGCAttrKind(E->getType())); 1131} 1132 1133llvm::Value *CodeGenFunction::EmitIvarOffset(ObjCInterfaceDecl *Interface, 1134 const ObjCIvarDecl *Ivar) { 1135 // Objective-C objects are traditionally C structures with their layout 1136 // defined at compile-time. In some implementations, their layout is not 1137 // defined until run time in order to allow instance variables to be added to 1138 // a class without recompiling all of the subclasses. If this is the case 1139 // then the CGObjCRuntime subclass must return true to LateBoundIvars and 1140 // implement the lookup itself. 1141 if (CGM.getObjCRuntime().LateBoundIVars()) 1142 assert(0 && "late-bound ivars are unsupported"); 1143 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1144} 1145 1146LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1147 llvm::Value *BaseValue, 1148 const ObjCIvarDecl *Ivar, 1149 const FieldDecl *Field, 1150 unsigned CVRQualifiers) { 1151 // See comment in EmitIvarOffset. 1152 if (CGM.getObjCRuntime().LateBoundIVars()) 1153 assert(0 && "late-bound ivars are unsupported"); 1154 1155 LValue LV = CGM.getObjCRuntime().EmitObjCValueForIvar(*this, 1156 ObjectTy, 1157 BaseValue, Ivar, Field, 1158 CVRQualifiers); 1159 return LV; 1160} 1161 1162LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1163 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1164 llvm::Value *BaseValue = 0; 1165 const Expr *BaseExpr = E->getBase(); 1166 unsigned CVRQualifiers = 0; 1167 QualType ObjectTy; 1168 if (E->isArrow()) { 1169 BaseValue = EmitScalarExpr(BaseExpr); 1170 const PointerType *PTy = 1171 cast<PointerType>(getContext().getCanonicalType(BaseExpr->getType())); 1172 ObjectTy = PTy->getPointeeType(); 1173 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1174 } else { 1175 LValue BaseLV = EmitLValue(BaseExpr); 1176 // FIXME: this isn't right for bitfields. 1177 BaseValue = BaseLV.getAddress(); 1178 ObjectTy = BaseExpr->getType(); 1179 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1180 } 1181 1182 return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 1183 getContext().getFieldDecl(E), CVRQualifiers); 1184} 1185 1186LValue 1187CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1188 // This is a special l-value that just issues sends when we load or 1189 // store through it. 1190 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1191} 1192 1193LValue 1194CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) { 1195 // This is a special l-value that just issues sends when we load or 1196 // store through it. 1197 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1198} 1199 1200LValue 1201CodeGenFunction::EmitObjCSuperExpr(const ObjCSuperExpr *E) { 1202 return EmitUnsupportedLValue(E, "use of super"); 1203} 1204 1205RValue CodeGenFunction::EmitCallExpr(llvm::Value *Callee, QualType CalleeType, 1206 CallExpr::const_arg_iterator ArgBeg, 1207 CallExpr::const_arg_iterator ArgEnd, 1208 const Decl *TargetDecl) { 1209 // Get the actual function type. The callee type will always be a 1210 // pointer to function type or a block pointer type. 1211 assert(CalleeType->isFunctionPointerType() && 1212 "Call must have function pointer type!"); 1213 1214 QualType FnType = CalleeType->getAsPointerType()->getPointeeType(); 1215 QualType ResultType = FnType->getAsFunctionType()->getResultType(); 1216 1217 CallArgList Args; 1218 EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd); 1219 1220 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), 1221 Callee, Args, TargetDecl); 1222} 1223