CGExpr.cpp revision a1cf15f4680e5cf39e72e28c5ea854fcba792e84
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGObjCRuntime.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/DeclObjC.h" 20#include "llvm/Target/TargetData.h" 21using namespace clang; 22using namespace CodeGen; 23 24//===--------------------------------------------------------------------===// 25// Miscellaneous Helper Methods 26//===--------------------------------------------------------------------===// 27 28/// CreateTempAlloca - This creates a alloca and inserts it into the entry 29/// block. 30llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 31 const char *Name) { 32 if (!Builder.isNamePreserving()) 33 Name = ""; 34 return new llvm::AllocaInst(VMContext, Ty, 0, Name, AllocaInsertPt); 35} 36 37/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 38/// expression and compare the result against zero, returning an Int1Ty value. 39llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 40 QualType BoolTy = getContext().BoolTy; 41 if (!E->getType()->isAnyComplexType()) 42 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 43 44 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 45} 46 47/// EmitAnyExpr - Emit code to compute the specified expression which can have 48/// any type. The result is returned as an RValue struct. If this is an 49/// aggregate expression, the aggloc/agglocvolatile arguments indicate where 50/// the result should be returned. 51RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 52 bool isAggLocVolatile, bool IgnoreResult) { 53 if (!hasAggregateLLVMType(E->getType())) 54 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 55 else if (E->getType()->isAnyComplexType()) 56 return RValue::getComplex(EmitComplexExpr(E, false, false, 57 IgnoreResult, IgnoreResult)); 58 59 EmitAggExpr(E, AggLoc, isAggLocVolatile, IgnoreResult); 60 return RValue::getAggregate(AggLoc, isAggLocVolatile); 61} 62 63/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result 64/// will always be accessible even if no aggregate location is 65/// provided. 66RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc, 67 bool isAggLocVolatile) { 68 if (!AggLoc && hasAggregateLLVMType(E->getType()) && 69 !E->getType()->isAnyComplexType()) 70 AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); 71 return EmitAnyExpr(E, AggLoc, isAggLocVolatile); 72} 73 74RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 75 QualType DestType) { 76 RValue Val; 77 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 78 // Emit the expr as an lvalue. 79 LValue LV = EmitLValue(E); 80 if (LV.isSimple()) 81 return RValue::get(LV.getAddress()); 82 Val = EmitLoadOfLValue(LV, E->getType()); 83 } else { 84 Val = EmitAnyExprToTemp(E); 85 } 86 87 if (Val.isAggregate()) { 88 Val = RValue::get(Val.getAggregateAddr()); 89 } else { 90 // Create a temporary variable that we can bind the reference to. 91 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), 92 "reftmp"); 93 if (Val.isScalar()) 94 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 95 else 96 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 97 Val = RValue::get(Temp); 98 } 99 100 return Val; 101} 102 103 104/// getAccessedFieldNo - Given an encoded value and a result number, return 105/// the input field number being accessed. 106unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 107 const llvm::Constant *Elts) { 108 if (isa<llvm::ConstantAggregateZero>(Elts)) 109 return 0; 110 111 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 112} 113 114 115//===----------------------------------------------------------------------===// 116// LValue Expression Emission 117//===----------------------------------------------------------------------===// 118 119RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 120 if (Ty->isVoidType()) { 121 return RValue::get(0); 122 } else if (const ComplexType *CTy = Ty->getAsComplexType()) { 123 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 124 llvm::Value *U = VMContext.getUndef(EltTy); 125 return RValue::getComplex(std::make_pair(U, U)); 126 } else if (hasAggregateLLVMType(Ty)) { 127 const llvm::Type *LTy = VMContext.getPointerTypeUnqual(ConvertType(Ty)); 128 return RValue::getAggregate(VMContext.getUndef(LTy)); 129 } else { 130 return RValue::get(VMContext.getUndef(ConvertType(Ty))); 131 } 132} 133 134RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 135 const char *Name) { 136 ErrorUnsupported(E, Name); 137 return GetUndefRValue(E->getType()); 138} 139 140LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 141 const char *Name) { 142 ErrorUnsupported(E, Name); 143 llvm::Type *Ty = VMContext.getPointerTypeUnqual(ConvertType(E->getType())); 144 return LValue::MakeAddr(VMContext.getUndef(Ty), 145 E->getType().getCVRQualifiers(), 146 getContext().getObjCGCAttrKind(E->getType())); 147} 148 149/// EmitLValue - Emit code to compute a designator that specifies the location 150/// of the expression. 151/// 152/// This can return one of two things: a simple address or a bitfield 153/// reference. In either case, the LLVM Value* in the LValue structure is 154/// guaranteed to be an LLVM pointer type. 155/// 156/// If this returns a bitfield reference, nothing about the pointee type of 157/// the LLVM value is known: For example, it may not be a pointer to an 158/// integer. 159/// 160/// If this returns a normal address, and if the lvalue's C type is fixed 161/// size, this method guarantees that the returned pointer type will point to 162/// an LLVM type of the same size of the lvalue's type. If the lvalue has a 163/// variable length type, this is not possible. 164/// 165LValue CodeGenFunction::EmitLValue(const Expr *E) { 166 switch (E->getStmtClass()) { 167 default: return EmitUnsupportedLValue(E, "l-value expression"); 168 169 case Expr::BinaryOperatorClass: 170 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 171 case Expr::CallExprClass: 172 case Expr::CXXOperatorCallExprClass: 173 return EmitCallExprLValue(cast<CallExpr>(E)); 174 case Expr::VAArgExprClass: 175 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 176 case Expr::DeclRefExprClass: 177 case Expr::QualifiedDeclRefExprClass: 178 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 179 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 180 case Expr::PredefinedExprClass: 181 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 182 case Expr::StringLiteralClass: 183 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 184 case Expr::ObjCEncodeExprClass: 185 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 186 187 case Expr::BlockDeclRefExprClass: 188 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 189 190 case Expr::CXXConditionDeclExprClass: 191 return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); 192 case Expr::CXXTemporaryObjectExprClass: 193 case Expr::CXXConstructExprClass: 194 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 195 case Expr::CXXBindTemporaryExprClass: 196 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 197 198 case Expr::ObjCMessageExprClass: 199 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 200 case Expr::ObjCIvarRefExprClass: 201 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 202 case Expr::ObjCPropertyRefExprClass: 203 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 204 case Expr::ObjCKVCRefExprClass: 205 return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E)); 206 case Expr::ObjCSuperExprClass: 207 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 208 209 case Expr::StmtExprClass: 210 return EmitStmtExprLValue(cast<StmtExpr>(E)); 211 case Expr::UnaryOperatorClass: 212 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 213 case Expr::ArraySubscriptExprClass: 214 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 215 case Expr::ExtVectorElementExprClass: 216 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 217 case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); 218 case Expr::CompoundLiteralExprClass: 219 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 220 case Expr::ConditionalOperatorClass: 221 return EmitConditionalOperator(cast<ConditionalOperator>(E)); 222 case Expr::ChooseExprClass: 223 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 224 case Expr::ImplicitCastExprClass: 225 case Expr::CStyleCastExprClass: 226 case Expr::CXXFunctionalCastExprClass: 227 case Expr::CXXStaticCastExprClass: 228 case Expr::CXXDynamicCastExprClass: 229 case Expr::CXXReinterpretCastExprClass: 230 case Expr::CXXConstCastExprClass: 231 return EmitCastLValue(cast<CastExpr>(E)); 232 } 233} 234 235llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 236 QualType Ty) { 237 llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); 238 239 // Bool can have different representation in memory than in registers. 240 if (Ty->isBooleanType()) 241 if (V->getType() != llvm::Type::Int1Ty) 242 V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool"); 243 244 return V; 245} 246 247void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 248 bool Volatile, QualType Ty) { 249 250 if (Ty->isBooleanType()) { 251 // Bool can have different representation in memory than in registers. 252 const llvm::Type *SrcTy = Value->getType(); 253 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 254 if (DstPtr->getElementType() != SrcTy) { 255 const llvm::Type *MemTy = 256 VMContext.getPointerType(SrcTy, DstPtr->getAddressSpace()); 257 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 258 } 259 } 260 Builder.CreateStore(Value, Addr, Volatile); 261} 262 263/// EmitLoadOfLValue - Given an expression that represents a value lvalue, 264/// this method emits the address of the lvalue, then loads the result as an 265/// rvalue, returning the rvalue. 266RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 267 if (LV.isObjCWeak()) { 268 // load of a __weak object. 269 llvm::Value *AddrWeakObj = LV.getAddress(); 270 llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, 271 AddrWeakObj); 272 return RValue::get(read_weak); 273 } 274 275 if (LV.isSimple()) { 276 llvm::Value *Ptr = LV.getAddress(); 277 const llvm::Type *EltTy = 278 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 279 280 // Simple scalar l-value. 281 if (EltTy->isSingleValueType()) 282 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 283 ExprType)); 284 285 assert(ExprType->isFunctionType() && "Unknown scalar value"); 286 return RValue::get(Ptr); 287 } 288 289 if (LV.isVectorElt()) { 290 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 291 LV.isVolatileQualified(), "tmp"); 292 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 293 "vecext")); 294 } 295 296 // If this is a reference to a subset of the elements of a vector, either 297 // shuffle the input or extract/insert them as appropriate. 298 if (LV.isExtVectorElt()) 299 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 300 301 if (LV.isBitfield()) 302 return EmitLoadOfBitfieldLValue(LV, ExprType); 303 304 if (LV.isPropertyRef()) 305 return EmitLoadOfPropertyRefLValue(LV, ExprType); 306 307 assert(LV.isKVCRef() && "Unknown LValue type!"); 308 return EmitLoadOfKVCRefLValue(LV, ExprType); 309} 310 311RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 312 QualType ExprType) { 313 unsigned StartBit = LV.getBitfieldStartBit(); 314 unsigned BitfieldSize = LV.getBitfieldSize(); 315 llvm::Value *Ptr = LV.getBitfieldAddr(); 316 317 const llvm::Type *EltTy = 318 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 319 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 320 321 // In some cases the bitfield may straddle two memory locations. 322 // Currently we load the entire bitfield, then do the magic to 323 // sign-extend it if necessary. This results in somewhat more code 324 // than necessary for the common case (one load), since two shifts 325 // accomplish both the masking and sign extension. 326 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 327 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 328 329 // Shift to proper location. 330 if (StartBit) 331 Val = Builder.CreateLShr(Val, VMContext.getConstantInt(EltTy, StartBit), 332 "bf.lo"); 333 334 // Mask off unused bits. 335 llvm::Constant *LowMask = 336 VMContext.getConstantInt(llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 337 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 338 339 // Fetch the high bits if necessary. 340 if (LowBits < BitfieldSize) { 341 unsigned HighBits = BitfieldSize - LowBits; 342 llvm::Value *HighPtr = 343 Builder.CreateGEP(Ptr, VMContext.getConstantInt(llvm::Type::Int32Ty, 1), 344 "bf.ptr.hi"); 345 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 346 LV.isVolatileQualified(), 347 "tmp"); 348 349 // Mask off unused bits. 350 llvm::Constant *HighMask = 351 VMContext.getConstantInt(llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 352 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 353 354 // Shift to proper location and or in to bitfield value. 355 HighVal = Builder.CreateShl(HighVal, 356 VMContext.getConstantInt(EltTy, LowBits)); 357 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 358 } 359 360 // Sign extend if necessary. 361 if (LV.isBitfieldSigned()) { 362 llvm::Value *ExtraBits = VMContext.getConstantInt(EltTy, 363 EltTySize - BitfieldSize); 364 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 365 ExtraBits, "bf.val.sext"); 366 } 367 368 // The bitfield type and the normal type differ when the storage sizes 369 // differ (currently just _Bool). 370 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 371 372 return RValue::get(Val); 373} 374 375RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 376 QualType ExprType) { 377 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 378} 379 380RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 381 QualType ExprType) { 382 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 383} 384 385// If this is a reference to a subset of the elements of a vector, create an 386// appropriate shufflevector. 387RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 388 QualType ExprType) { 389 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 390 LV.isVolatileQualified(), "tmp"); 391 392 const llvm::Constant *Elts = LV.getExtVectorElts(); 393 394 // If the result of the expression is a non-vector type, we must be 395 // extracting a single element. Just codegen as an extractelement. 396 const VectorType *ExprVT = ExprType->getAsVectorType(); 397 if (!ExprVT) { 398 unsigned InIdx = getAccessedFieldNo(0, Elts); 399 llvm::Value *Elt = VMContext.getConstantInt(llvm::Type::Int32Ty, InIdx); 400 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 401 } 402 403 // Always use shuffle vector to try to retain the original program structure 404 unsigned NumResultElts = ExprVT->getNumElements(); 405 406 llvm::SmallVector<llvm::Constant*, 4> Mask; 407 for (unsigned i = 0; i != NumResultElts; ++i) { 408 unsigned InIdx = getAccessedFieldNo(i, Elts); 409 Mask.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, InIdx)); 410 } 411 412 llvm::Value *MaskV = VMContext.getConstantVector(&Mask[0], Mask.size()); 413 Vec = Builder.CreateShuffleVector(Vec, 414 VMContext.getUndef(Vec->getType()), 415 MaskV, "tmp"); 416 return RValue::get(Vec); 417} 418 419 420 421/// EmitStoreThroughLValue - Store the specified rvalue into the specified 422/// lvalue, where both are guaranteed to the have the same type, and that type 423/// is 'Ty'. 424void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 425 QualType Ty) { 426 if (!Dst.isSimple()) { 427 if (Dst.isVectorElt()) { 428 // Read/modify/write the vector, inserting the new element. 429 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 430 Dst.isVolatileQualified(), "tmp"); 431 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 432 Dst.getVectorIdx(), "vecins"); 433 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 434 return; 435 } 436 437 // If this is an update of extended vector elements, insert them as 438 // appropriate. 439 if (Dst.isExtVectorElt()) 440 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 441 442 if (Dst.isBitfield()) 443 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 444 445 if (Dst.isPropertyRef()) 446 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 447 448 if (Dst.isKVCRef()) 449 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 450 451 assert(0 && "Unknown LValue type"); 452 } 453 454 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 455 // load of a __weak object. 456 llvm::Value *LvalueDst = Dst.getAddress(); 457 llvm::Value *src = Src.getScalarVal(); 458 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 459 return; 460 } 461 462 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 463 // load of a __strong object. 464 llvm::Value *LvalueDst = Dst.getAddress(); 465 llvm::Value *src = Src.getScalarVal(); 466#if 0 467 // FIXME. We cannot positively determine if we have an 'ivar' assignment, 468 // object assignment or an unknown assignment. For now, generate call to 469 // objc_assign_strongCast assignment which is a safe, but consevative 470 // assumption. 471 if (Dst.isObjCIvar()) 472 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst); 473 else 474 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 475#endif 476 if (Dst.isGlobalObjCRef()) 477 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 478 else 479 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 480 return; 481 } 482 483 assert(Src.isScalar() && "Can't emit an agg store with this method"); 484 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 485 Dst.isVolatileQualified(), Ty); 486} 487 488void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 489 QualType Ty, 490 llvm::Value **Result) { 491 unsigned StartBit = Dst.getBitfieldStartBit(); 492 unsigned BitfieldSize = Dst.getBitfieldSize(); 493 llvm::Value *Ptr = Dst.getBitfieldAddr(); 494 495 const llvm::Type *EltTy = 496 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 497 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 498 499 // Get the new value, cast to the appropriate type and masked to 500 // exactly the size of the bit-field. 501 llvm::Value *SrcVal = Src.getScalarVal(); 502 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 503 llvm::Constant *Mask = 504 VMContext.getConstantInt(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 505 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 506 507 // Return the new value of the bit-field, if requested. 508 if (Result) { 509 // Cast back to the proper type for result. 510 const llvm::Type *SrcTy = SrcVal->getType(); 511 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 512 "bf.reload.val"); 513 514 // Sign extend if necessary. 515 if (Dst.isBitfieldSigned()) { 516 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 517 llvm::Value *ExtraBits = VMContext.getConstantInt(SrcTy, 518 SrcTySize - BitfieldSize); 519 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 520 ExtraBits, "bf.reload.sext"); 521 } 522 523 *Result = SrcTrunc; 524 } 525 526 // In some cases the bitfield may straddle two memory locations. 527 // Emit the low part first and check to see if the high needs to be 528 // done. 529 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 530 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 531 "bf.prev.low"); 532 533 // Compute the mask for zero-ing the low part of this bitfield. 534 llvm::Constant *InvMask = 535 VMContext.getConstantInt(~llvm::APInt::getBitsSet(EltTySize, StartBit, 536 StartBit + LowBits)); 537 538 // Compute the new low part as 539 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 540 // with the shift of NewVal implicitly stripping the high bits. 541 llvm::Value *NewLowVal = 542 Builder.CreateShl(NewVal, VMContext.getConstantInt(EltTy, StartBit), 543 "bf.value.lo"); 544 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 545 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 546 547 // Write back. 548 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 549 550 // If the low part doesn't cover the bitfield emit a high part. 551 if (LowBits < BitfieldSize) { 552 unsigned HighBits = BitfieldSize - LowBits; 553 llvm::Value *HighPtr = 554 Builder.CreateGEP(Ptr, VMContext.getConstantInt(llvm::Type::Int32Ty, 1), 555 "bf.ptr.hi"); 556 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 557 Dst.isVolatileQualified(), 558 "bf.prev.hi"); 559 560 // Compute the mask for zero-ing the high part of this bitfield. 561 llvm::Constant *InvMask = 562 VMContext.getConstantInt(~llvm::APInt::getLowBitsSet(EltTySize, 563 HighBits)); 564 565 // Compute the new high part as 566 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 567 // where the high bits of NewVal have already been cleared and the 568 // shift stripping the low bits. 569 llvm::Value *NewHighVal = 570 Builder.CreateLShr(NewVal, VMContext.getConstantInt(EltTy, LowBits), 571 "bf.value.high"); 572 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 573 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 574 575 // Write back. 576 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 577 } 578} 579 580void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 581 LValue Dst, 582 QualType Ty) { 583 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 584} 585 586void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 587 LValue Dst, 588 QualType Ty) { 589 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 590} 591 592void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 593 LValue Dst, 594 QualType Ty) { 595 // This access turns into a read/modify/write of the vector. Load the input 596 // value now. 597 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 598 Dst.isVolatileQualified(), "tmp"); 599 const llvm::Constant *Elts = Dst.getExtVectorElts(); 600 601 llvm::Value *SrcVal = Src.getScalarVal(); 602 603 if (const VectorType *VTy = Ty->getAsVectorType()) { 604 unsigned NumSrcElts = VTy->getNumElements(); 605 unsigned NumDstElts = 606 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 607 if (NumDstElts == NumSrcElts) { 608 // Use shuffle vector is the src and destination are the same number 609 // of elements and restore the vector mask since it is on the side 610 // it will be stored. 611 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 612 for (unsigned i = 0; i != NumSrcElts; ++i) { 613 unsigned InIdx = getAccessedFieldNo(i, Elts); 614 Mask[InIdx] = VMContext.getConstantInt(llvm::Type::Int32Ty, i); 615 } 616 617 llvm::Value *MaskV = VMContext.getConstantVector(&Mask[0], Mask.size()); 618 Vec = Builder.CreateShuffleVector(SrcVal, 619 VMContext.getUndef(Vec->getType()), 620 MaskV, "tmp"); 621 } 622 else if (NumDstElts > NumSrcElts) { 623 // Extended the source vector to the same length and then shuffle it 624 // into the destination. 625 // FIXME: since we're shuffling with undef, can we just use the indices 626 // into that? This could be simpler. 627 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 628 unsigned i; 629 for (i = 0; i != NumSrcElts; ++i) 630 ExtMask.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, i)); 631 for (; i != NumDstElts; ++i) 632 ExtMask.push_back(VMContext.getUndef(llvm::Type::Int32Ty)); 633 llvm::Value *ExtMaskV = VMContext.getConstantVector(&ExtMask[0], 634 ExtMask.size()); 635 llvm::Value *ExtSrcVal = 636 Builder.CreateShuffleVector(SrcVal, 637 VMContext.getUndef(SrcVal->getType()), 638 ExtMaskV, "tmp"); 639 // build identity 640 llvm::SmallVector<llvm::Constant*, 4> Mask; 641 for (unsigned i = 0; i != NumDstElts; ++i) { 642 Mask.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, i)); 643 } 644 // modify when what gets shuffled in 645 for (unsigned i = 0; i != NumSrcElts; ++i) { 646 unsigned Idx = getAccessedFieldNo(i, Elts); 647 Mask[Idx] = VMContext.getConstantInt(llvm::Type::Int32Ty, i+NumDstElts); 648 } 649 llvm::Value *MaskV = VMContext.getConstantVector(&Mask[0], Mask.size()); 650 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 651 } 652 else { 653 // We should never shorten the vector 654 assert(0 && "unexpected shorten vector length"); 655 } 656 } else { 657 // If the Src is a scalar (not a vector) it must be updating one element. 658 unsigned InIdx = getAccessedFieldNo(0, Elts); 659 llvm::Value *Elt = VMContext.getConstantInt(llvm::Type::Int32Ty, InIdx); 660 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 661 } 662 663 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 664} 665 666LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 667 const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); 668 669 if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || 670 isa<ImplicitParamDecl>(VD))) { 671 LValue LV; 672 bool NonGCable = VD->hasLocalStorage() && 673 !VD->hasAttr<BlocksAttr>(); 674 if (VD->hasExternalStorage()) { 675 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 676 if (VD->getType()->isReferenceType()) 677 V = Builder.CreateLoad(V, "tmp"); 678 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 679 getContext().getObjCGCAttrKind(E->getType())); 680 } 681 else { 682 llvm::Value *V = LocalDeclMap[VD]; 683 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 684 // local variables do not get their gc attribute set. 685 QualType::GCAttrTypes attr = QualType::GCNone; 686 // local static? 687 if (!NonGCable) 688 attr = getContext().getObjCGCAttrKind(E->getType()); 689 if (VD->hasAttr<BlocksAttr>()) { 690 bool needsCopyDispose = BlockRequiresCopying(VD->getType()); 691 const llvm::Type *PtrStructTy = V->getType(); 692 const llvm::Type *Ty = PtrStructTy; 693 Ty = VMContext.getPointerType(Ty, 0); 694 V = Builder.CreateStructGEP(V, 1, "forwarding"); 695 V = Builder.CreateBitCast(V, Ty); 696 V = Builder.CreateLoad(V, false); 697 V = Builder.CreateBitCast(V, PtrStructTy); 698 V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); 699 } 700 if (VD->getType()->isReferenceType()) 701 V = Builder.CreateLoad(V, "tmp"); 702 LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr); 703 } 704 LValue::SetObjCNonGC(LV, NonGCable); 705 return LV; 706 } else if (VD && VD->isFileVarDecl()) { 707 llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); 708 if (VD->getType()->isReferenceType()) 709 V = Builder.CreateLoad(V, "tmp"); 710 LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 711 getContext().getObjCGCAttrKind(E->getType())); 712 if (LV.isObjCStrong()) 713 LV.SetGlobalObjCRef(LV, true); 714 return LV; 715 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) { 716 llvm::Value* V = CGM.GetAddrOfFunction(GlobalDecl(FD)); 717 if (!FD->hasPrototype()) { 718 if (const FunctionProtoType *Proto = 719 FD->getType()->getAsFunctionProtoType()) { 720 // Ugly case: for a K&R-style definition, the type of the definition 721 // isn't the same as the type of a use. Correct for this with a 722 // bitcast. 723 QualType NoProtoType = 724 getContext().getFunctionNoProtoType(Proto->getResultType()); 725 NoProtoType = getContext().getPointerType(NoProtoType); 726 V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp"); 727 } 728 } 729 return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 730 getContext().getObjCGCAttrKind(E->getType())); 731 } 732 else if (const ImplicitParamDecl *IPD = 733 dyn_cast<ImplicitParamDecl>(E->getDecl())) { 734 llvm::Value *V = LocalDeclMap[IPD]; 735 assert(V && "BlockVarDecl not entered in LocalDeclMap?"); 736 return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), 737 getContext().getObjCGCAttrKind(E->getType())); 738 } 739 assert(0 && "Unimp declref"); 740 //an invalid LValue, but the assert will 741 //ensure that this point is never reached. 742 return LValue(); 743} 744 745LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 746 return LValue::MakeAddr(GetAddrOfBlockDecl(E), 747 E->getType().getCVRQualifiers(), 748 getContext().getObjCGCAttrKind(E->getType())); 749} 750 751LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 752 // __extension__ doesn't affect lvalue-ness. 753 if (E->getOpcode() == UnaryOperator::Extension) 754 return EmitLValue(E->getSubExpr()); 755 756 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 757 switch (E->getOpcode()) { 758 default: assert(0 && "Unknown unary operator lvalue!"); 759 case UnaryOperator::Deref: 760 { 761 QualType T = E->getSubExpr()->getType()->getPointeeType(); 762 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 763 764 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), 765 T.getCVRQualifiers(), 766 getContext().getObjCGCAttrKind(T)); 767 // We should not generate __weak write barrier on indirect reference 768 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 769 // But, we continue to generate __strong write barrier on indirect write 770 // into a pointer to object. 771 if (getContext().getLangOptions().ObjC1 && 772 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 773 LV.isObjCWeak()) 774 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 775 return LV; 776 } 777 case UnaryOperator::Real: 778 case UnaryOperator::Imag: 779 LValue LV = EmitLValue(E->getSubExpr()); 780 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 781 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 782 Idx, "idx"), 783 ExprTy.getCVRQualifiers()); 784 } 785} 786 787LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 788 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0); 789} 790 791LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 792 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0); 793} 794 795 796LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 797 std::string GlobalVarName; 798 799 switch (Type) { 800 default: 801 assert(0 && "Invalid type"); 802 case PredefinedExpr::Func: 803 GlobalVarName = "__func__."; 804 break; 805 case PredefinedExpr::Function: 806 GlobalVarName = "__FUNCTION__."; 807 break; 808 case PredefinedExpr::PrettyFunction: 809 // FIXME:: Demangle C++ method names 810 GlobalVarName = "__PRETTY_FUNCTION__."; 811 break; 812 } 813 814 // FIXME: This isn't right at all. The logic for computing this should go 815 // into a method on PredefinedExpr. This would allow sema and codegen to be 816 // consistent for things like sizeof(__func__) etc. 817 std::string FunctionName; 818 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 819 FunctionName = CGM.getMangledName(FD); 820 } else { 821 // Just get the mangled name; skipping the asm prefix if it 822 // exists. 823 FunctionName = CurFn->getName(); 824 if (FunctionName[0] == '\01') 825 FunctionName = FunctionName.substr(1, std::string::npos); 826 } 827 828 GlobalVarName += FunctionName; 829 llvm::Constant *C = 830 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 831 return LValue::MakeAddr(C, 0); 832} 833 834LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 835 switch (E->getIdentType()) { 836 default: 837 return EmitUnsupportedLValue(E, "predefined expression"); 838 case PredefinedExpr::Func: 839 case PredefinedExpr::Function: 840 case PredefinedExpr::PrettyFunction: 841 return EmitPredefinedFunctionName(E->getIdentType()); 842 } 843} 844 845LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 846 // The index must always be an integer, which is not an aggregate. Emit it. 847 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 848 QualType IdxTy = E->getIdx()->getType(); 849 bool IdxSigned = IdxTy->isSignedIntegerType(); 850 851 // If the base is a vector type, then we are forming a vector element lvalue 852 // with this subscript. 853 if (E->getBase()->getType()->isVectorType()) { 854 // Emit the vector as an lvalue to get its address. 855 LValue LHS = EmitLValue(E->getBase()); 856 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 857 Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned, "vidx"); 858 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 859 E->getBase()->getType().getCVRQualifiers()); 860 } 861 862 // The base must be a pointer, which is not an aggregate. Emit it. 863 llvm::Value *Base = EmitScalarExpr(E->getBase()); 864 865 // Extend or truncate the index type to 32 or 64-bits. 866 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 867 if (IdxBitwidth != LLVMPointerWidth) 868 Idx = Builder.CreateIntCast(Idx, VMContext.getIntegerType(LLVMPointerWidth), 869 IdxSigned, "idxprom"); 870 871 // We know that the pointer points to a type of the correct size, 872 // unless the size is a VLA or Objective-C interface. 873 llvm::Value *Address = 0; 874 if (const VariableArrayType *VAT = 875 getContext().getAsVariableArrayType(E->getType())) { 876 llvm::Value *VLASize = VLASizeMap[VAT]; 877 878 Idx = Builder.CreateMul(Idx, VLASize); 879 880 QualType BaseType = getContext().getBaseElementType(VAT); 881 882 uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; 883 Idx = Builder.CreateUDiv(Idx, 884 VMContext.getConstantInt(Idx->getType(), 885 BaseTypeSize)); 886 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 887 } else if (const ObjCInterfaceType *OIT = 888 dyn_cast<ObjCInterfaceType>(E->getType())) { 889 llvm::Value *InterfaceSize = 890 VMContext.getConstantInt(Idx->getType(), 891 getContext().getTypeSize(OIT) / 8); 892 893 Idx = Builder.CreateMul(Idx, InterfaceSize); 894 895 llvm::Type *i8PTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 896 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 897 Idx, "arrayidx"); 898 Address = Builder.CreateBitCast(Address, Base->getType()); 899 } else { 900 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 901 } 902 903 QualType T = E->getBase()->getType()->getPointeeType(); 904 assert(!T.isNull() && 905 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 906 907 LValue LV = LValue::MakeAddr(Address, 908 T.getCVRQualifiers(), 909 getContext().getObjCGCAttrKind(T)); 910 if (getContext().getLangOptions().ObjC1 && 911 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) 912 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 913 return LV; 914} 915 916static 917llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 918 llvm::SmallVector<unsigned, 4> &Elts) { 919 llvm::SmallVector<llvm::Constant *, 4> CElts; 920 921 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 922 CElts.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, Elts[i])); 923 924 return VMContext.getConstantVector(&CElts[0], CElts.size()); 925} 926 927LValue CodeGenFunction:: 928EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 929 // Emit the base vector as an l-value. 930 LValue Base; 931 932 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 933 if (!E->isArrow()) { 934 assert(E->getBase()->getType()->isVectorType()); 935 Base = EmitLValue(E->getBase()); 936 } else { 937 const PointerType *PT = E->getBase()->getType()->getAsPointerType(); 938 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 939 Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers()); 940 } 941 942 // Encode the element access list into a vector of unsigned indices. 943 llvm::SmallVector<unsigned, 4> Indices; 944 E->getEncodedElementAccess(Indices); 945 946 if (Base.isSimple()) { 947 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 948 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 949 Base.getQualifiers()); 950 } 951 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 952 953 llvm::Constant *BaseElts = Base.getExtVectorElts(); 954 llvm::SmallVector<llvm::Constant *, 4> CElts; 955 956 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 957 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 958 CElts.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, 0)); 959 else 960 CElts.push_back(BaseElts->getOperand(Indices[i])); 961 } 962 llvm::Constant *CV = VMContext.getConstantVector(&CElts[0], CElts.size()); 963 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 964 Base.getQualifiers()); 965} 966 967LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 968 bool isUnion = false; 969 bool isIvar = false; 970 bool isNonGC = false; 971 Expr *BaseExpr = E->getBase(); 972 llvm::Value *BaseValue = NULL; 973 unsigned CVRQualifiers=0; 974 975 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 976 if (E->isArrow()) { 977 BaseValue = EmitScalarExpr(BaseExpr); 978 const PointerType *PTy = 979 BaseExpr->getType()->getAsPointerType(); 980 if (PTy->getPointeeType()->isUnionType()) 981 isUnion = true; 982 CVRQualifiers = PTy->getPointeeType().getCVRQualifiers(); 983 } else if (isa<ObjCPropertyRefExpr>(BaseExpr) || 984 isa<ObjCKVCRefExpr>(BaseExpr)) { 985 RValue RV = EmitObjCPropertyGet(BaseExpr); 986 BaseValue = RV.getAggregateAddr(); 987 if (BaseExpr->getType()->isUnionType()) 988 isUnion = true; 989 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 990 } else { 991 LValue BaseLV = EmitLValue(BaseExpr); 992 if (BaseLV.isObjCIvar()) 993 isIvar = true; 994 if (BaseLV.isNonGC()) 995 isNonGC = true; 996 // FIXME: this isn't right for bitfields. 997 BaseValue = BaseLV.getAddress(); 998 if (BaseExpr->getType()->isUnionType()) 999 isUnion = true; 1000 CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); 1001 } 1002 1003 FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl()); 1004 // FIXME: Handle non-field member expressions 1005 assert(Field && "No code generation for non-field member references"); 1006 LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion, 1007 CVRQualifiers); 1008 LValue::SetObjCIvar(MemExpLV, isIvar); 1009 LValue::SetObjCNonGC(MemExpLV, isNonGC); 1010 return MemExpLV; 1011} 1012 1013LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1014 FieldDecl* Field, 1015 unsigned CVRQualifiers) { 1016 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1017 // FIXME: CodeGenTypes should expose a method to get the appropriate type for 1018 // FieldTy (the appropriate type is ABI-dependent). 1019 const llvm::Type *FieldTy = 1020 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1021 const llvm::PointerType *BaseTy = 1022 cast<llvm::PointerType>(BaseValue->getType()); 1023 unsigned AS = BaseTy->getAddressSpace(); 1024 BaseValue = Builder.CreateBitCast(BaseValue, 1025 VMContext.getPointerType(FieldTy, AS), 1026 "tmp"); 1027 llvm::Value *V = Builder.CreateGEP(BaseValue, 1028 VMContext.getConstantInt(llvm::Type::Int32Ty, idx), 1029 "tmp"); 1030 1031 CodeGenTypes::BitFieldInfo bitFieldInfo = 1032 CGM.getTypes().getBitFieldInfo(Field); 1033 return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size, 1034 Field->getType()->isSignedIntegerType(), 1035 Field->getType().getCVRQualifiers()|CVRQualifiers); 1036} 1037 1038LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1039 FieldDecl* Field, 1040 bool isUnion, 1041 unsigned CVRQualifiers) 1042{ 1043 if (Field->isBitField()) 1044 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1045 1046 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1047 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1048 1049 // Match union field type. 1050 if (isUnion) { 1051 const llvm::Type *FieldTy = 1052 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1053 const llvm::PointerType * BaseTy = 1054 cast<llvm::PointerType>(BaseValue->getType()); 1055 unsigned AS = BaseTy->getAddressSpace(); 1056 V = Builder.CreateBitCast(V, 1057 VMContext.getPointerType(FieldTy, AS), 1058 "tmp"); 1059 } 1060 if (Field->getType()->isReferenceType()) 1061 V = Builder.CreateLoad(V, "tmp"); 1062 1063 QualType::GCAttrTypes attr = QualType::GCNone; 1064 if (CGM.getLangOptions().ObjC1 && 1065 CGM.getLangOptions().getGCMode() != LangOptions::NonGC) { 1066 QualType Ty = Field->getType(); 1067 attr = Ty.getObjCGCAttr(); 1068 if (attr != QualType::GCNone) { 1069 // __weak attribute on a field is ignored. 1070 if (attr == QualType::Weak) 1071 attr = QualType::GCNone; 1072 } 1073 else if (getContext().isObjCObjectPointerType(Ty)) 1074 attr = QualType::Strong; 1075 } 1076 LValue LV = 1077 LValue::MakeAddr(V, 1078 Field->getType().getCVRQualifiers()|CVRQualifiers, 1079 attr); 1080 return LV; 1081} 1082 1083LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1084 const llvm::Type *LTy = ConvertType(E->getType()); 1085 llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); 1086 1087 const Expr* InitExpr = E->getInitializer(); 1088 LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers()); 1089 1090 if (E->getType()->isComplexType()) { 1091 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1092 } else if (hasAggregateLLVMType(E->getType())) { 1093 EmitAnyExpr(InitExpr, DeclPtr, false); 1094 } else { 1095 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1096 } 1097 1098 return Result; 1099} 1100 1101LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { 1102 // We don't handle vectors yet. 1103 if (E->getType()->isVectorType()) 1104 return EmitUnsupportedLValue(E, "conditional operator"); 1105 1106 // ?: here should be an aggregate. 1107 assert((hasAggregateLLVMType(E->getType()) && 1108 !E->getType()->isAnyComplexType()) && 1109 "Unexpected conditional operator!"); 1110 1111 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1112 EmitAggExpr(E, Temp, false); 1113 1114 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1115 getContext().getObjCGCAttrKind(E->getType())); 1116 1117} 1118 1119/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code 1120/// generator in an lvalue context, then it must mean that we need the address 1121/// of an aggregate in order to access one of its fields. This can happen for 1122/// all the reasons that casts are permitted with aggregate result, including 1123/// noop aggregate casts, and cast from scalar to union. 1124LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1125 // If this is an aggregate-to-aggregate cast, just use the input's address as 1126 // the lvalue. 1127 if (getContext().hasSameUnqualifiedType(E->getType(), 1128 E->getSubExpr()->getType())) 1129 return EmitLValue(E->getSubExpr()); 1130 1131 // Otherwise, we must have a cast from scalar to union. 1132 assert(E->getType()->isUnionType() && "Expected scalar-to-union cast"); 1133 1134 // Casts are only lvalues when the source and destination types are the same. 1135 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1136 EmitAnyExpr(E->getSubExpr(), Temp, false); 1137 1138 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1139 getContext().getObjCGCAttrKind(E->getType())); 1140} 1141 1142//===--------------------------------------------------------------------===// 1143// Expression Emission 1144//===--------------------------------------------------------------------===// 1145 1146 1147RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { 1148 // Builtins never have block type. 1149 if (E->getCallee()->getType()->isBlockPointerType()) 1150 return EmitBlockCallExpr(E); 1151 1152 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1153 return EmitCXXMemberCallExpr(CE); 1154 1155 const Decl *TargetDecl = 0; 1156 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1157 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1158 TargetDecl = DRE->getDecl(); 1159 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1160 if (unsigned builtinID = FD->getBuiltinID(getContext())) 1161 return EmitBuiltinExpr(FD, builtinID, E); 1162 } 1163 } 1164 1165 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1166 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1167 return EmitCXXOperatorMemberCallExpr(CE, MD); 1168 1169 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1170 return EmitCall(Callee, E->getCallee()->getType(), 1171 E->arg_begin(), E->arg_end(), TargetDecl); 1172} 1173 1174LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1175 // Comma expressions just emit their LHS then their RHS as an l-value. 1176 if (E->getOpcode() == BinaryOperator::Comma) { 1177 EmitAnyExpr(E->getLHS()); 1178 return EmitLValue(E->getRHS()); 1179 } 1180 1181 // Can only get l-value for binary operator expressions which are a 1182 // simple assignment of aggregate type. 1183 if (E->getOpcode() != BinaryOperator::Assign) 1184 return EmitUnsupportedLValue(E, "binary l-value expression"); 1185 1186 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1187 EmitAggExpr(E, Temp, false); 1188 // FIXME: Are these qualifiers correct? 1189 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), 1190 getContext().getObjCGCAttrKind(E->getType())); 1191} 1192 1193LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1194 RValue RV = EmitCallExpr(E); 1195 1196 if (RV.isScalar()) { 1197 assert(E->getCallReturnType()->isReferenceType() && 1198 "Can't have a scalar return unless the return type is a " 1199 "reference type!"); 1200 1201 return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(), 1202 getContext().getObjCGCAttrKind(E->getType())); 1203 } 1204 1205 return LValue::MakeAddr(RV.getAggregateAddr(), 1206 E->getType().getCVRQualifiers(), 1207 getContext().getObjCGCAttrKind(E->getType())); 1208} 1209 1210LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1211 // FIXME: This shouldn't require another copy. 1212 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1213 EmitAggExpr(E, Temp, false); 1214 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers()); 1215} 1216 1217LValue 1218CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { 1219 EmitLocalBlockVarDecl(*E->getVarDecl()); 1220 return EmitDeclRefLValue(E); 1221} 1222 1223LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1224 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp"); 1225 EmitCXXConstructExpr(Temp, E); 1226 return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers()); 1227} 1228 1229LValue 1230CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1231 LValue LV = EmitLValue(E->getSubExpr()); 1232 1233 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1234 1235 return LV; 1236} 1237 1238LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1239 // Can only get l-value for message expression returning aggregate type 1240 RValue RV = EmitObjCMessageExpr(E); 1241 // FIXME: can this be volatile? 1242 return LValue::MakeAddr(RV.getAggregateAddr(), 1243 E->getType().getCVRQualifiers(), 1244 getContext().getObjCGCAttrKind(E->getType())); 1245} 1246 1247llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1248 const ObjCIvarDecl *Ivar) { 1249 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1250} 1251 1252LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1253 llvm::Value *BaseValue, 1254 const ObjCIvarDecl *Ivar, 1255 unsigned CVRQualifiers) { 1256 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1257 Ivar, CVRQualifiers); 1258} 1259 1260LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1261 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1262 llvm::Value *BaseValue = 0; 1263 const Expr *BaseExpr = E->getBase(); 1264 unsigned CVRQualifiers = 0; 1265 QualType ObjectTy; 1266 if (E->isArrow()) { 1267 BaseValue = EmitScalarExpr(BaseExpr); 1268 ObjectTy = BaseExpr->getType()->getPointeeType(); 1269 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1270 } else { 1271 LValue BaseLV = EmitLValue(BaseExpr); 1272 // FIXME: this isn't right for bitfields. 1273 BaseValue = BaseLV.getAddress(); 1274 ObjectTy = BaseExpr->getType(); 1275 CVRQualifiers = ObjectTy.getCVRQualifiers(); 1276 } 1277 1278 return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers); 1279} 1280 1281LValue 1282CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1283 // This is a special l-value that just issues sends when we load or 1284 // store through it. 1285 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1286} 1287 1288LValue 1289CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) { 1290 // This is a special l-value that just issues sends when we load or 1291 // store through it. 1292 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1293} 1294 1295LValue 1296CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1297 return EmitUnsupportedLValue(E, "use of super"); 1298} 1299 1300LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1301 1302 // Can only get l-value for message expression returning aggregate type 1303 RValue RV = EmitAnyExprToTemp(E); 1304 // FIXME: can this be volatile? 1305 return LValue::MakeAddr(RV.getAggregateAddr(), 1306 E->getType().getCVRQualifiers(), 1307 getContext().getObjCGCAttrKind(E->getType())); 1308} 1309 1310 1311RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, 1312 CallExpr::const_arg_iterator ArgBeg, 1313 CallExpr::const_arg_iterator ArgEnd, 1314 const Decl *TargetDecl) { 1315 // Get the actual function type. The callee type will always be a 1316 // pointer to function type or a block pointer type. 1317 assert(CalleeType->isFunctionPointerType() && 1318 "Call must have function pointer type!"); 1319 1320 QualType FnType = CalleeType->getAsPointerType()->getPointeeType(); 1321 QualType ResultType = FnType->getAsFunctionType()->getResultType(); 1322 1323 CallArgList Args; 1324 EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd); 1325 1326 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), 1327 Callee, Args, TargetDecl); 1328} 1329