CGExpr.cpp revision 0f294632f36459174199b77699e339715244b5ab
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGObjCRuntime.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/DeclObjC.h" 20#include "llvm/Target/TargetData.h" 21using namespace clang; 22using namespace CodeGen; 23 24//===--------------------------------------------------------------------===// 25// Miscellaneous Helper Methods 26//===--------------------------------------------------------------------===// 27 28/// CreateTempAlloca - This creates a alloca and inserts it into the entry 29/// block. 30llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 31 const char *Name) { 32 if (!Builder.isNamePreserving()) 33 Name = ""; 34 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 35} 36 37/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 38/// expression and compare the result against zero, returning an Int1Ty value. 39llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 40 QualType BoolTy = getContext().BoolTy; 41 if (!E->getType()->isAnyComplexType()) 42 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 43 44 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 45} 46 47/// EmitAnyExpr - Emit code to compute the specified expression which can have 48/// any type. The result is returned as an RValue struct. If this is an 49/// aggregate expression, the aggloc/agglocvolatile arguments indicate where 50/// the result should be returned. 51RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 52 bool isAggLocVolatile, bool IgnoreResult) { 53 if (!hasAggregateLLVMType(E->getType())) 54 return RValue::get(EmitScalarExpr(E)); 55 else if (E->getType()->isAnyComplexType()) 56 return RValue::getComplex(EmitComplexExpr(E)); 57 58 EmitAggExpr(E, AggLoc, isAggLocVolatile, IgnoreResult); 59 return RValue::getAggregate(AggLoc, isAggLocVolatile); 60} 61 62/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result 63/// will always be accessible even if no aggregate location is 64/// provided. 65RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc, 66 bool isAggLocVolatile) { 67 if (!AggLoc && hasAggregateLLVMType(E->getType()) && 68 !E->getType()->isAnyComplexType()) 69 AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); 70 return EmitAnyExpr(E, AggLoc, isAggLocVolatile); 71} 72 73RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 74 QualType DestType) { 75 RValue Val; 76 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 77 // Emit the expr as an lvalue. 78 LValue LV = EmitLValue(E); 79 if (LV.isSimple()) 80 return RValue::get(LV.getAddress()); 81 Val = EmitLoadOfLValue(LV, E->getType()); 82 } else { 83 Val = EmitAnyExprToTemp(E); 84 } 85 86 if (Val.isAggregate()) { 87 Val = RValue::get(Val.getAggregateAddr()); 88 } else { 89 // Create a temporary variable that we can bind the reference to. 90 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), 91 "reftmp"); 92 if (Val.isScalar()) 93 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 94 else 95 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 96 Val = RValue::get(Temp); 97 } 98 99 return Val; 100} 101 102 103/// getAccessedFieldNo - Given an encoded value and a result number, return 104/// the input field number being accessed. 105unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 106 const llvm::Constant *Elts) { 107 if (isa<llvm::ConstantAggregateZero>(Elts)) 108 return 0; 109 110 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 111} 112 113 114//===----------------------------------------------------------------------===// 115// LValue Expression Emission 116//===----------------------------------------------------------------------===// 117 118RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 119 if (Ty->isVoidType()) { 120 return RValue::get(0); 121 } else if (const ComplexType *CTy = Ty->getAsComplexType()) { 122 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 123 llvm::Value *U = llvm::UndefValue::get(EltTy); 124 return RValue::getComplex(std::make_pair(U, U)); 125 } else if (hasAggregateLLVMType(Ty)) { 126 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 127 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 128 } else { 129 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 130 } 131} 132 133RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 134 const char *Name) { 135 ErrorUnsupported(E, Name); 136 return GetUndefRValue(E->getType()); 137} 138 139LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 140 const char *Name) { 141 ErrorUnsupported(E, Name); 142 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 143 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 144 E->getType().getCVRQualifiers(), 145 getContext().getObjCGCAttrKind(E->getType())); 146} 147 148/// EmitLValue - Emit code to compute a designator that specifies the location 149/// of the expression. 150/// 151/// This can return one of two things: a simple address or a bitfield 152/// reference. In either case, the LLVM Value* in the LValue structure is 153/// guaranteed to be an LLVM pointer type. 154/// 155/// If this returns a bitfield reference, nothing about the pointee type of 156/// the LLVM value is known: For example, it may not be a pointer to an 157/// integer. 158/// 159/// If this returns a normal address, and if the lvalue's C type is fixed 160/// size, this method guarantees that the returned pointer type will point to 161/// an LLVM type of the same size of the lvalue's type. If the lvalue has a 162/// variable length type, this is not possible. 163/// 164LValue CodeGenFunction::EmitLValue(const Expr *E) { 165 switch (E->getStmtClass()) { 166 default: return EmitUnsupportedLValue(E, "l-value expression"); 167 168 case Expr::BinaryOperatorClass: 169 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 170 case Expr::CallExprClass: 171 case Expr::CXXOperatorCallExprClass: 172 return EmitCallExprLValue(cast<CallExpr>(E)); 173 case Expr::VAArgExprClass: 174 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 175 case Expr::DeclRefExprClass: 176 case Expr::QualifiedDeclRefExprClass: 177 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 178 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 179 case Expr::PredefinedExprClass: 180 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 181 case Expr::StringLiteralClass: 182 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 183 case Expr::ObjCEncodeExprClass: 184 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 185 186 case Expr::BlockDeclRefExprClass: 187 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 188 189 case Expr::CXXConditionDeclExprClass: 190 return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); 191 192 case Expr::ObjCMessageExprClass: 193 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 194 case Expr::ObjCIvarRefExprClass: 195 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 196 case Expr::ObjCPropertyRefExprClass: 197 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 198 case Expr::ObjCKVCRefExprClass: 199 return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E)); 200 case Expr::ObjCSuperExprClass: 201 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 202 203 case Expr::StmtExprClass: 204 return EmitStmtExprLValue(cast<StmtExpr>(E)); 205 case Expr::UnaryOperatorClass: 206 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 207 case Expr::ArraySubscriptExprClass: 208 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 209 case Expr::ExtVectorElementExprClass: 210 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 211 case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); 212 case Expr::CompoundLiteralExprClass: 213 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 214 case Expr::ConditionalOperatorClass: 215 return EmitConditionalOperator(cast<ConditionalOperator>(E)); 216 case Expr::ChooseExprClass: 217 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 218 case Expr::ImplicitCastExprClass: 219 case Expr::CStyleCastExprClass: 220 case Expr::CXXFunctionalCastExprClass: 221 case Expr::CXXStaticCastExprClass: 222 case Expr::CXXDynamicCastExprClass: 223 case Expr::CXXReinterpretCastExprClass: 224 case Expr::CXXConstCastExprClass: 225 return EmitCastLValue(cast<CastExpr>(E)); 226 } 227} 228 229llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 230 QualType Ty) { 231 llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); 232 233 // Bool can have different representation in memory than in registers. 234 if (Ty->isBooleanType()) 235 if (V->getType() != llvm::Type::Int1Ty) 236 V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool"); 237 238 return V; 239} 240 241void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 242 bool Volatile, QualType Ty) { 243 244 if (Ty->isBooleanType()) { 245 // Bool can have different representation in memory than in registers. 246 const llvm::Type *SrcTy = Value->getType(); 247 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 248 if (DstPtr->getElementType() != SrcTy) { 249 const llvm::Type *MemTy = 250 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 251 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 252 } 253 } 254 255 Builder.CreateStore(Value, Addr, Volatile); 256} 257 258/// EmitLoadOfLValue - Given an expression that represents a value lvalue, 259/// this method emits the address of the lvalue, then loads the result as an 260/// rvalue, returning the rvalue. 261RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 262 if (LV.isObjCWeak()) { 263 // load of a __weak object. 264 llvm::Value *AddrWeakObj = LV.getAddress(); 265 llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, 266 AddrWeakObj); 267 return RValue::get(read_weak); 268 } 269 270 if (LV.isSimple()) { 271 llvm::Value *Ptr = LV.getAddress(); 272 const llvm::Type *EltTy = 273 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 274 275 // Simple scalar l-value. 276 if (EltTy->isSingleValueType()) 277 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 278 ExprType)); 279 280 assert(ExprType->isFunctionType() && "Unknown scalar value"); 281 return RValue::get(Ptr); 282 } 283 284 if (LV.isVectorElt()) { 285 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 286 LV.isVolatileQualified(), "tmp"); 287 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 288 "vecext")); 289 } 290 291 // If this is a reference to a subset of the elements of a vector, either 292 // shuffle the input or extract/insert them as appropriate. 293 if (LV.isExtVectorElt()) 294 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 295 296 if (LV.isBitfield()) 297 return EmitLoadOfBitfieldLValue(LV, ExprType); 298 299 if (LV.isPropertyRef()) 300 return EmitLoadOfPropertyRefLValue(LV, ExprType); 301 302 assert(LV.isKVCRef() && "Unknown LValue type!"); 303 return EmitLoadOfKVCRefLValue(LV, ExprType); 304} 305 306RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 307 QualType ExprType) { 308 unsigned StartBit = LV.getBitfieldStartBit(); 309 unsigned BitfieldSize = LV.getBitfieldSize(); 310 llvm::Value *Ptr = LV.getBitfieldAddr(); 311 312 const llvm::Type *EltTy = 313 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 314 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 315 316 // In some cases the bitfield may straddle two memory locations. 317 // Currently we load the entire bitfield, then do the magic to 318 // sign-extend it if necessary. This results in somewhat more code 319 // than necessary for the common case (one load), since two shifts 320 // accomplish both the masking and sign extension. 321 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 322 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 323 324 // Shift to proper location. 325 if (StartBit) 326 Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), 327 "bf.lo"); 328 329 // Mask off unused bits. 330 llvm::Constant *LowMask = 331 llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 332 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 333 334 // Fetch the high bits if necessary. 335 if (LowBits < BitfieldSize) { 336 unsigned HighBits = BitfieldSize - LowBits; 337 llvm::Value *HighPtr = 338 Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), 339 "bf.ptr.hi"); 340 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 341 LV.isVolatileQualified(), 342 "tmp"); 343 344 // Mask off unused bits. 345 llvm::Constant *HighMask = 346 llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 347 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 348 349 // Shift to proper location and or in to bitfield value. 350 HighVal = Builder.CreateShl(HighVal, 351 llvm::ConstantInt::get(EltTy, LowBits)); 352 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 353 } 354 355 // Sign extend if necessary. 356 if (LV.isBitfieldSigned()) { 357 llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, 358 EltTySize - BitfieldSize); 359 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 360 ExtraBits, "bf.val.sext"); 361 } 362 363 // The bitfield type and the normal type differ when the storage sizes 364 // differ (currently just _Bool). 365 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 366 367 return RValue::get(Val); 368} 369 370RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 371 QualType ExprType) { 372 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 373} 374 375RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 376 QualType ExprType) { 377 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 378} 379 380// If this is a reference to a subset of the elements of a vector, create an 381// appropriate shufflevector. 382RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 383 QualType ExprType) { 384 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 385 LV.isVolatileQualified(), "tmp"); 386 387 const llvm::Constant *Elts = LV.getExtVectorElts(); 388 389 // If the result of the expression is a non-vector type, we must be 390 // extracting a single element. Just codegen as an extractelement. 391 const VectorType *ExprVT = ExprType->getAsVectorType(); 392 if (!ExprVT) { 393 unsigned InIdx = getAccessedFieldNo(0, Elts); 394 llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); 395 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 396 } 397 398 // Always use shuffle vector to try to retain the original program structure 399 unsigned NumResultElts = ExprVT->getNumElements(); 400 401 llvm::SmallVector<llvm::Constant*, 4> Mask; 402 for (unsigned i = 0; i != NumResultElts; ++i) { 403 unsigned InIdx = getAccessedFieldNo(i, Elts); 404 Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); 405 } 406 407 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 408 Vec = Builder.CreateShuffleVector(Vec, 409 llvm::UndefValue::get(Vec->getType()), 410 MaskV, "tmp"); 411 return RValue::get(Vec); 412} 413 414 415 416/// EmitStoreThroughLValue - Store the specified rvalue into the specified 417/// lvalue, where both are guaranteed to the have the same type, and that type 418/// is 'Ty'. 419void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 420 QualType Ty) { 421 if (!Dst.isSimple()) { 422 if (Dst.isVectorElt()) { 423 // Read/modify/write the vector, inserting the new element. 424 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 425 Dst.isVolatileQualified(), "tmp"); 426 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 427 Dst.getVectorIdx(), "vecins"); 428 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 429 return; 430 } 431 432 // If this is an update of extended vector elements, insert them as 433 // appropriate. 434 if (Dst.isExtVectorElt()) 435 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 436 437 if (Dst.isBitfield()) 438 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 439 440 if (Dst.isPropertyRef()) 441 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 442 443 if (Dst.isKVCRef()) 444 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 445 446 assert(0 && "Unknown LValue type"); 447 } 448 449 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 450 // load of a __weak object. 451 llvm::Value *LvalueDst = Dst.getAddress(); 452 llvm::Value *src = Src.getScalarVal(); 453 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 454 return; 455 } 456 457 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 458 // load of a __strong object. 459 llvm::Value *LvalueDst = Dst.getAddress(); 460 llvm::Value *src = Src.getScalarVal(); 461#if 0 462 // FIXME. We cannot positively determine if we have an 'ivar' assignment, 463 // object assignment or an unknown assignment. For now, generate call to 464 // objc_assign_strongCast assignment which is a safe, but consevative 465 // assumption. 466 if (Dst.isObjCIvar()) 467 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst); 468 else 469 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 470#endif 471 if (Dst.isGlobalObjCRef()) 472 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 473 else 474 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 475 return; 476 } 477 478 assert(Src.isScalar() && "Can't emit an agg store with this method"); 479 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 480 Dst.isVolatileQualified(), Ty); 481} 482 483void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 484 QualType Ty, 485 llvm::Value **Result) { 486 unsigned StartBit = Dst.getBitfieldStartBit(); 487 unsigned BitfieldSize = Dst.getBitfieldSize(); 488 llvm::Value *Ptr = Dst.getBitfieldAddr(); 489 490 const llvm::Type *EltTy = 491 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 492 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 493 494 // Get the new value, cast to the appropriate type and masked to 495 // exactly the size of the bit-field. 496 llvm::Value *SrcVal = Src.getScalarVal(); 497 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 498 llvm::Constant *Mask = 499 llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 500 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 501 502 // Return the new value of the bit-field, if requested. 503 if (Result) { 504 // Cast back to the proper type for result. 505 const llvm::Type *SrcTy = SrcVal->getType(); 506 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 507 "bf.reload.val"); 508 509 // Sign extend if necessary. 510 if (Dst.isBitfieldSigned()) { 511 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 512 llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, 513 SrcTySize - BitfieldSize); 514 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 515 ExtraBits, "bf.reload.sext"); 516 } 517 518 *Result = SrcTrunc; 519 } 520 521 // In some cases the bitfield may straddle two memory locations. 522 // Emit the low part first and check to see if the high needs to be 523 // done. 524 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 525 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 526 "bf.prev.low"); 527 528 // Compute the mask for zero-ing the low part of this bitfield. 529 llvm::Constant *InvMask = 530 llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit, 531 StartBit + LowBits)); 532 533 // Compute the new low part as 534 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 535 // with the shift of NewVal implicitly stripping the high bits. 536 llvm::Value *NewLowVal = 537 Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), 538 "bf.value.lo"); 539 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 540 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 541 542 // Write back. 543 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 544 545 // If the low part doesn't cover the bitfield emit a high part. 546 if (LowBits < BitfieldSize) { 547 unsigned HighBits = BitfieldSize - LowBits; 548 llvm::Value *HighPtr = 549 Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), 550 "bf.ptr.hi"); 551 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 552 Dst.isVolatileQualified(), 553 "bf.prev.hi"); 554 555 // Compute the mask for zero-ing the high part of this bitfield. 556 llvm::Constant *InvMask = 557 llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 558 559 // Compute the new high part as 560 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 561 // where the high bits of NewVal have already been cleared and the 562 // shift stripping the low bits. 563 llvm::Value *NewHighVal = 564 Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), 565 "bf.value.high"); 566 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 567 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 568 569 // Write back. 570 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 571 } 572} 573 574void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 575 LValue Dst, 576 QualType Ty) { 577 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 578} 579 580void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 581 LValue Dst, 582 QualType Ty) { 583 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 584} 585 586void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 587 LValue Dst, 588 QualType Ty) { 589 // This access turns into a read/modify/write of the vector. Load the input 590 // value now. 591 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 592 Dst.isVolatileQualified(), "tmp"); 593 const llvm::Constant *Elts = Dst.getExtVectorElts(); 594 595 llvm::Value *SrcVal = Src.getScalarVal(); 596 597 if (const VectorType *VTy = Ty->getAsVectorType()) { 598 unsigned NumSrcElts = VTy->getNumElements(); 599 unsigned NumDstElts = 600 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 601 if (NumDstElts == NumSrcElts) { 602 // Use shuffle vector is the src and destination are the same number 603 // of elements 604 llvm::SmallVector<llvm::Constant*, 4> Mask; 605 for (unsigned i = 0; i != NumSrcElts; ++i) { 606 unsigned InIdx = getAccessedFieldNo(i, Elts); 607 Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); 608 } 609 610 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 611 Vec = Builder.CreateShuffleVector(SrcVal, 612 llvm::UndefValue::get(Vec->getType()), 613 MaskV, "tmp"); 614 } 615 else if (NumDstElts > NumSrcElts) { 616 // Extended the source vector to the same length and then shuffle it 617 // into the destination. 618 // FIXME: since we're shuffling with undef, can we just use the indices 619 // into that? This could be simpler. 620 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 621 unsigned i; 622 for (i = 0; i != NumSrcElts; ++i) 623 ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); 624 for (; i != NumDstElts; ++i) 625 ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty)); 626 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 627 ExtMask.size()); 628 llvm::Value *ExtSrcVal = 629 Builder.CreateShuffleVector(SrcVal, 630 llvm::UndefValue::get(SrcVal->getType()), 631 ExtMaskV, "tmp"); 632 // build identity 633 llvm::SmallVector<llvm::Constant*, 4> Mask; 634 for (unsigned i = 0; i != NumDstElts; ++i) { 635 Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); 636 } 637 // modify when what gets shuffled in 638 for (unsigned i = 0; i != NumSrcElts; ++i) { 639 unsigned Idx = getAccessedFieldNo(i, Elts); 640 Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts); 641 } 642 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 643 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 644 } 645 else { 646 // We should never shorten the vector 647 assert(0 && "unexpected shorten vector length"); 648 } 649 } else { 650 // If the Src is a scalar (not a vector) it must be updating one element. 651 unsigned InIdx = getAccessedFieldNo(0, Elts); 652 llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); 653 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 654 } 655 656 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 657} 658 659LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 660 const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); 661 662 if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || 663 isa<ImplicitParamDecl>(VD))) { 664 LValue LV; 665 bool GCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 666 if (VD->hasExternalStorage()) { 667 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 668 if (VD->getType()->isReferenceType()) 669 V = Builder.CreateLoad(V, "tmp"); 670 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 671 getContext().getObjCGCAttrKind(E->getType())); 672 } 673 else { 674 llvm::Value *V = LocalDeclMap[VD]; 675 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 676 // local variables do not get their gc attribute set. 677 QualType::GCAttrTypes attr = QualType::GCNone; 678 // local static? 679 if (!GCable) 680 attr = getContext().getObjCGCAttrKind(E->getType()); 681 if (VD->hasAttr<BlocksAttr>()) { 682 bool needsCopyDispose = BlockRequiresCopying(VD->getType()); 683 const llvm::Type *PtrStructTy = V->getType(); 684 const llvm::Type *Ty = PtrStructTy; 685 Ty = llvm::PointerType::get(Ty, 0); 686 V = Builder.CreateStructGEP(V, 1, "forwarding"); 687 V = Builder.CreateBitCast(V, Ty); 688 V = Builder.CreateLoad(V, false); 689 V = Builder.CreateBitCast(V, PtrStructTy); 690 V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); 691 } 692 if (VD->getType()->isReferenceType()) 693 V = Builder.CreateLoad(V, "tmp"); 694 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr); 695 } 696 LValue::SetObjCNonGC(LV, GCable); 697 return LV; 698 } else if (VD && VD->isFileVarDecl()) { 699 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 700 if (VD->getType()->isReferenceType()) 701 V = Builder.CreateLoad(V, "tmp"); 702 LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 703 getContext().getObjCGCAttrKind(E->getType())); 704 if (LV.isObjCStrong()) 705 LV.SetGlobalObjCRef(LV, true); 706 return LV; 707 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) { 708 return LValue::MakeAddr(CGM.GetAddrOfFunction(GlobalDecl(FD)), 709 E->getType().getCVRQualifiers(), 710 getContext().getObjCGCAttrKind(E->getType())); 711 } 712 else if (const ImplicitParamDecl *IPD = 713 dyn_cast<ImplicitParamDecl>(E->getDecl())) { 714 llvm::Value *V = LocalDeclMap[IPD]; 715 assert(V && "BlockVarDecl not entered in LocalDeclMap?"); 716 return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 717 getContext().getObjCGCAttrKind(E->getType())); 718 } 719 assert(0 && "Unimp declref"); 720 //an invalid LValue, but the assert will 721 //ensure that this point is never reached. 722 return LValue(); 723} 724 725LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 726 return LValue::MakeAddr(GetAddrOfBlockDecl(E), 727 E->getType().getCVRQualifiers(), 728 getContext().getObjCGCAttrKind(E->getType())); 729} 730 731LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 732 // __extension__ doesn't affect lvalue-ness. 733 if (E->getOpcode() == UnaryOperator::Extension) 734 return EmitLValue(E->getSubExpr()); 735 736 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 737 switch (E->getOpcode()) { 738 default: assert(0 && "Unknown unary operator lvalue!"); 739 case UnaryOperator::Deref: 740 { 741 QualType T = 742 E->getSubExpr()->getType()->getAsPointerType()->getPointeeType(); 743 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), 744 ExprTy->getAsPointerType()->getPointeeType() 745 .getCVRQualifiers(), 746 getContext().getObjCGCAttrKind(T)); 747 // We should not generate __weak write barrier on indirect reference 748 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 749 // But, we continue to generate __strong write barrier on indirect write 750 // into a pointer to object. 751 if (getContext().getLangOptions().ObjC1 && 752 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 753 LV.isObjCWeak()) 754 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate()); 755 return LV; 756 } 757 case UnaryOperator::Real: 758 case UnaryOperator::Imag: 759 LValue LV = EmitLValue(E->getSubExpr()); 760 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 761 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 762 Idx, "idx"), 763 ExprTy.getCVRQualifiers()); 764 } 765} 766 767LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 768 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0); 769} 770 771LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 772 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0); 773} 774 775 776LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 777 std::string GlobalVarName; 778 779 switch (Type) { 780 default: 781 assert(0 && "Invalid type"); 782 case PredefinedExpr::Func: 783 GlobalVarName = "__func__."; 784 break; 785 case PredefinedExpr::Function: 786 GlobalVarName = "__FUNCTION__."; 787 break; 788 case PredefinedExpr::PrettyFunction: 789 // FIXME:: Demangle C++ method names 790 GlobalVarName = "__PRETTY_FUNCTION__."; 791 break; 792 } 793 794 // FIXME: This isn't right at all. The logic for computing this should go 795 // into a method on PredefinedExpr. This would allow sema and codegen to be 796 // consistent for things like sizeof(__func__) etc. 797 std::string FunctionName; 798 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 799 FunctionName = CGM.getMangledName(FD); 800 } else { 801 // Just get the mangled name; skipping the asm prefix if it 802 // exists. 803 FunctionName = CurFn->getName(); 804 if (FunctionName[0] == '\01') 805 FunctionName = FunctionName.substr(1, std::string::npos); 806 } 807 808 GlobalVarName += FunctionName; 809 llvm::Constant *C = 810 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 811 return LValue::MakeAddr(C, 0); 812} 813 814LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 815 switch (E->getIdentType()) { 816 default: 817 return EmitUnsupportedLValue(E, "predefined expression"); 818 case PredefinedExpr::Func: 819 case PredefinedExpr::Function: 820 case PredefinedExpr::PrettyFunction: 821 return EmitPredefinedFunctionName(E->getIdentType()); 822 } 823} 824 825LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 826 // The index must always be an integer, which is not an aggregate. Emit it. 827 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 828 829 // If the base is a vector type, then we are forming a vector element lvalue 830 // with this subscript. 831 if (E->getBase()->getType()->isVectorType()) { 832 // Emit the vector as an lvalue to get its address. 833 LValue LHS = EmitLValue(E->getBase()); 834 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 835 // FIXME: This should properly sign/zero/extend or truncate Idx to i32. 836 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 837 E->getBase()->getType().getCVRQualifiers()); 838 } 839 840 // The base must be a pointer, which is not an aggregate. Emit it. 841 llvm::Value *Base = EmitScalarExpr(E->getBase()); 842 843 // Extend or truncate the index type to 32 or 64-bits. 844 QualType IdxTy = E->getIdx()->getType(); 845 bool IdxSigned = IdxTy->isSignedIntegerType(); 846 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 847 if (IdxBitwidth != LLVMPointerWidth) 848 Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth), 849 IdxSigned, "idxprom"); 850 851 // We know that the pointer points to a type of the correct size, 852 // unless the size is a VLA or Objective-C interface. 853 llvm::Value *Address = 0; 854 if (const VariableArrayType *VAT = 855 getContext().getAsVariableArrayType(E->getType())) { 856 llvm::Value *VLASize = VLASizeMap[VAT]; 857 858 Idx = Builder.CreateMul(Idx, VLASize); 859 860 QualType BaseType = getContext().getBaseElementType(VAT); 861 862 uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; 863 Idx = Builder.CreateUDiv(Idx, 864 llvm::ConstantInt::get(Idx->getType(), 865 BaseTypeSize)); 866 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 867 } else if (const ObjCInterfaceType *OIT = 868 dyn_cast<ObjCInterfaceType>(E->getType())) { 869 llvm::Value *InterfaceSize = 870 llvm::ConstantInt::get(Idx->getType(), 871 getContext().getTypeSize(OIT) / 8); 872 873 Idx = Builder.CreateMul(Idx, InterfaceSize); 874 875 llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 876 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 877 Idx, "arrayidx"); 878 Address = Builder.CreateBitCast(Address, Base->getType()); 879 } else { 880 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 881 } 882 883 QualType T = E->getBase()->getType()->getAsPointerType()->getPointeeType(); 884 LValue LV = LValue::MakeAddr(Address, 885 T.getCVRQualifiers(), 886 getContext().getObjCGCAttrKind(T)); 887 if (getContext().getLangOptions().ObjC1 && 888 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) 889 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate()); 890 return LV; 891} 892 893static 894llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) { 895 llvm::SmallVector<llvm::Constant *, 4> CElts; 896 897 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 898 CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i])); 899 900 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 901} 902 903LValue CodeGenFunction:: 904EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 905 // Emit the base vector as an l-value. 906 LValue Base; 907 908 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 909 if (!E->isArrow()) { 910 assert(E->getBase()->getType()->isVectorType()); 911 Base = EmitLValue(E->getBase()); 912 } else { 913 const PointerType *PT = E->getBase()->getType()->getAsPointerType(); 914 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 915 Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers()); 916 } 917 918 // Encode the element access list into a vector of unsigned indices. 919 llvm::SmallVector<unsigned, 4> Indices; 920 E->getEncodedElementAccess(Indices); 921 922 if (Base.isSimple()) { 923 llvm::Constant *CV = GenerateConstantVector(Indices); 924 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 925 Base.getQualifiers()); 926 } 927 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 928 929 llvm::Constant *BaseElts = Base.getExtVectorElts(); 930 llvm::SmallVector<llvm::Constant *, 4> CElts; 931 932 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 933 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 934 CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); 935 else 936 CElts.push_back(BaseElts->getOperand(Indices[i])); 937 } 938 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 939 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 940 Base.getQualifiers()); 941} 942 943LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 944 bool isUnion = false; 945 bool isIvar = false; 946 bool isNonGC = false; 947 Expr *BaseExpr = E->getBase(); 948 llvm::Value *BaseValue = NULL; 949 unsigned CVRQualifiers=0; 950 951 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 952 if (E->isArrow()) { 953 BaseValue = EmitScalarExpr(BaseExpr); 954 const PointerType *PTy = 955 BaseExpr->getType()->getAsPointerType(); 956 if (PTy->getPointeeType()->isUnionType()) 957 isUnion = true; 958 CVRQualifiers = PTy->getPointeeType().getCVRQualifiers(); 959 } else if (isa<ObjCPropertyRefExpr>(BaseExpr) || 960 isa<ObjCKVCRefExpr>(BaseExpr)) { 961 RValue RV = EmitObjCPropertyGet(BaseExpr); 962 BaseValue = RV.getAggregateAddr(); 963 if (BaseExpr->getType()->isUnionType()) 964 isUnion = true; 965 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 966 } else { 967 LValue BaseLV = EmitLValue(BaseExpr); 968 if (BaseLV.isObjCIvar()) 969 isIvar = true; 970 if (BaseLV.isNonGC()) 971 isNonGC = true; 972 // FIXME: this isn't right for bitfields. 973 BaseValue = BaseLV.getAddress(); 974 if (BaseExpr->getType()->isUnionType()) 975 isUnion = true; 976 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 977 } 978 979 FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl()); 980 // FIXME: Handle non-field member expressions 981 assert(Field && "No code generation for non-field member references"); 982 LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion, 983 CVRQualifiers); 984 LValue::SetObjCIvar(MemExpLV, isIvar); 985 LValue::SetObjCNonGC(MemExpLV, isNonGC); 986 return MemExpLV; 987} 988 989LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 990 FieldDecl* Field, 991 unsigned CVRQualifiers) { 992 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 993 // FIXME: CodeGenTypes should expose a method to get the appropriate type for 994 // FieldTy (the appropriate type is ABI-dependent). 995 const llvm::Type *FieldTy = 996 CGM.getTypes().ConvertTypeForMem(Field->getType()); 997 const llvm::PointerType *BaseTy = 998 cast<llvm::PointerType>(BaseValue->getType()); 999 unsigned AS = BaseTy->getAddressSpace(); 1000 BaseValue = Builder.CreateBitCast(BaseValue, 1001 llvm::PointerType::get(FieldTy, AS), 1002 "tmp"); 1003 llvm::Value *V = Builder.CreateGEP(BaseValue, 1004 llvm::ConstantInt::get(llvm::Type::Int32Ty, idx), 1005 "tmp"); 1006 1007 CodeGenTypes::BitFieldInfo bitFieldInfo = 1008 CGM.getTypes().getBitFieldInfo(Field); 1009 return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size, 1010 Field->getType()->isSignedIntegerType(), 1011 Field->getType().getCVRQualifiers()|CVRQualifiers); 1012} 1013 1014LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1015 FieldDecl* Field, 1016 bool isUnion, 1017 unsigned CVRQualifiers) 1018{ 1019 if (Field->isBitField()) 1020 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1021 1022 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1023 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1024 1025 // Match union field type. 1026 if (isUnion) { 1027 const llvm::Type *FieldTy = 1028 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1029 const llvm::PointerType * BaseTy = 1030 cast<llvm::PointerType>(BaseValue->getType()); 1031 unsigned AS = BaseTy->getAddressSpace(); 1032 V = Builder.CreateBitCast(V, 1033 llvm::PointerType::get(FieldTy, AS), 1034 "tmp"); 1035 } 1036 1037 QualType::GCAttrTypes attr = QualType::GCNone; 1038 if (CGM.getLangOptions().ObjC1 && 1039 CGM.getLangOptions().getGCMode() != LangOptions::NonGC) { 1040 QualType Ty = Field->getType(); 1041 attr = Ty.getObjCGCAttr(); 1042 if (attr != QualType::GCNone) { 1043 // __weak attribute on a field is ignored. 1044 if (attr == QualType::Weak) 1045 attr = QualType::GCNone; 1046 } 1047 else if (getContext().isObjCObjectPointerType(Ty)) 1048 attr = QualType::Strong; 1049 } 1050 LValue LV = 1051 LValue::MakeAddr(V, 1052 Field->getType().getCVRQualifiers()|CVRQualifiers, 1053 attr); 1054 return LV; 1055} 1056 1057LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1058 const llvm::Type *LTy = ConvertType(E->getType()); 1059 llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); 1060 1061 const Expr* InitExpr = E->getInitializer(); 1062 LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers()); 1063 1064 if (E->getType()->isComplexType()) { 1065 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1066 } else if (hasAggregateLLVMType(E->getType())) { 1067 EmitAnyExpr(InitExpr, DeclPtr, false); 1068 } else { 1069 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1070 } 1071 1072 return Result; 1073} 1074 1075LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { 1076 // We don't handle vectors yet. 1077 if (E->getType()->isVectorType()) 1078 return EmitUnsupportedLValue(E, "conditional operator"); 1079 1080 // ?: here should be an aggregate. 1081 assert((hasAggregateLLVMType(E->getType()) && 1082 !E->getType()->isAnyComplexType()) && 1083 "Unexpected conditional operator!"); 1084 1085 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1086 EmitAggExpr(E, Temp, false); 1087 1088 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1089 getContext().getObjCGCAttrKind(E->getType())); 1090 1091} 1092 1093/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code 1094/// generator in an lvalue context, then it must mean that we need the address 1095/// of an aggregate in order to access one of its fields. This can happen for 1096/// all the reasons that casts are permitted with aggregate result, including 1097/// noop aggregate casts, and cast from scalar to union. 1098LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1099 // If this is an aggregate-to-aggregate cast, just use the input's address as 1100 // the lvalue. 1101 if (getContext().hasSameUnqualifiedType(E->getType(), 1102 E->getSubExpr()->getType())) 1103 return EmitLValue(E->getSubExpr()); 1104 1105 // Otherwise, we must have a cast from scalar to union. 1106 assert(E->getType()->isUnionType() && "Expected scalar-to-union cast"); 1107 1108 // Casts are only lvalues when the source and destination types are the same. 1109 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1110 EmitAnyExpr(E->getSubExpr(), Temp, false); 1111 1112 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1113 getContext().getObjCGCAttrKind(E->getType())); 1114} 1115 1116//===--------------------------------------------------------------------===// 1117// Expression Emission 1118//===--------------------------------------------------------------------===// 1119 1120 1121RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { 1122 // Builtins never have block type. 1123 if (E->getCallee()->getType()->isBlockPointerType()) 1124 return EmitBlockCallExpr(E); 1125 1126 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1127 return EmitCXXMemberCallExpr(CE); 1128 1129 const Decl *TargetDecl = 0; 1130 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1131 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1132 TargetDecl = DRE->getDecl(); 1133 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1134 if (unsigned builtinID = FD->getBuiltinID(getContext())) 1135 return EmitBuiltinExpr(FD, builtinID, E); 1136 } 1137 } 1138 1139 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) { 1140 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1141 return EmitCXXOperatorMemberCallExpr(CE, MD); 1142 } 1143 1144 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1145 return EmitCall(Callee, E->getCallee()->getType(), 1146 E->arg_begin(), E->arg_end(), TargetDecl); 1147} 1148 1149LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1150 // Comma expressions just emit their LHS then their RHS as an l-value. 1151 if (E->getOpcode() == BinaryOperator::Comma) { 1152 EmitAnyExpr(E->getLHS()); 1153 return EmitLValue(E->getRHS()); 1154 } 1155 1156 // Can only get l-value for binary operator expressions which are a 1157 // simple assignment of aggregate type. 1158 if (E->getOpcode() != BinaryOperator::Assign) 1159 return EmitUnsupportedLValue(E, "binary l-value expression"); 1160 1161 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1162 EmitAggExpr(E, Temp, false); 1163 // FIXME: Are these qualifiers correct? 1164 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1165 getContext().getObjCGCAttrKind(E->getType())); 1166} 1167 1168LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1169 RValue RV = EmitCallExpr(E); 1170 1171 if (RV.isScalar()) { 1172 assert(E->getCallReturnType()->isReferenceType() && 1173 "Can't have a scalar return unless the return type is a " 1174 "reference type!"); 1175 1176 return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(), 1177 getContext().getObjCGCAttrKind(E->getType())); 1178 } 1179 1180 return LValue::MakeAddr(RV.getAggregateAddr(), 1181 E->getType().getCVRQualifiers(), 1182 getContext().getObjCGCAttrKind(E->getType())); 1183} 1184 1185LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1186 // FIXME: This shouldn't require another copy. 1187 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1188 EmitAggExpr(E, Temp, false); 1189 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers()); 1190} 1191 1192LValue 1193CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { 1194 EmitLocalBlockVarDecl(*E->getVarDecl()); 1195 return EmitDeclRefLValue(E); 1196} 1197 1198LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1199 // Can only get l-value for message expression returning aggregate type 1200 RValue RV = EmitObjCMessageExpr(E); 1201 // FIXME: can this be volatile? 1202 return LValue::MakeAddr(RV.getAggregateAddr(), 1203 E->getType().getCVRQualifiers(), 1204 getContext().getObjCGCAttrKind(E->getType())); 1205} 1206 1207llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1208 const ObjCIvarDecl *Ivar) { 1209 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1210} 1211 1212LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1213 llvm::Value *BaseValue, 1214 const ObjCIvarDecl *Ivar, 1215 unsigned CVRQualifiers) { 1216 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1217 Ivar, CVRQualifiers); 1218} 1219 1220LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1221 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1222 llvm::Value *BaseValue = 0; 1223 const Expr *BaseExpr = E->getBase(); 1224 unsigned CVRQualifiers = 0; 1225 QualType ObjectTy; 1226 if (E->isArrow()) { 1227 BaseValue = EmitScalarExpr(BaseExpr); 1228 const PointerType *PTy = BaseExpr->getType()->getAsPointerType(); 1229 ObjectTy = PTy->getPointeeType(); 1230 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1231 } else { 1232 LValue BaseLV = EmitLValue(BaseExpr); 1233 // FIXME: this isn't right for bitfields. 1234 BaseValue = BaseLV.getAddress(); 1235 ObjectTy = BaseExpr->getType(); 1236 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1237 } 1238 1239 return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers); 1240} 1241 1242LValue 1243CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1244 // This is a special l-value that just issues sends when we load or 1245 // store through it. 1246 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1247} 1248 1249LValue 1250CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) { 1251 // This is a special l-value that just issues sends when we load or 1252 // store through it. 1253 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1254} 1255 1256LValue 1257CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1258 return EmitUnsupportedLValue(E, "use of super"); 1259} 1260 1261LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1262 1263 // Can only get l-value for message expression returning aggregate type 1264 RValue RV = EmitAnyExprToTemp(E); 1265 // FIXME: can this be volatile? 1266 return LValue::MakeAddr(RV.getAggregateAddr(), 1267 E->getType().getCVRQualifiers(), 1268 getContext().getObjCGCAttrKind(E->getType())); 1269} 1270 1271 1272RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, 1273 CallExpr::const_arg_iterator ArgBeg, 1274 CallExpr::const_arg_iterator ArgEnd, 1275 const Decl *TargetDecl) { 1276 // Get the actual function type. The callee type will always be a 1277 // pointer to function type or a block pointer type. 1278 assert(CalleeType->isFunctionPointerType() && 1279 "Call must have function pointer type!"); 1280 1281 QualType FnType = CalleeType->getAsPointerType()->getPointeeType(); 1282 QualType ResultType = FnType->getAsFunctionType()->getResultType(); 1283 1284 CallArgList Args; 1285 EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd); 1286 1287 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), 1288 Callee, Args, TargetDecl); 1289} 1290