CGExpr.cpp revision af521dae8ee15ebf4ecceae3d25f66a475104a07
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGCXXABI.h" 18#include "CGDebugInfo.h" 19#include "CGRecordLayout.h" 20#include "CGObjCRuntime.h" 21#include "TargetInfo.h" 22#include "clang/AST/ASTContext.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Frontend/CodeGenOptions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Target/TargetData.h" 28using namespace clang; 29using namespace CodeGen; 30 31//===--------------------------------------------------------------------===// 32// Miscellaneous Helper Methods 33//===--------------------------------------------------------------------===// 34 35llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 36 unsigned addressSpace = 37 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 38 39 llvm::PointerType *destType = Int8PtrTy; 40 if (addressSpace) 41 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 42 43 if (value->getType() == destType) return value; 44 return Builder.CreateBitCast(value, destType); 45} 46 47/// CreateTempAlloca - This creates a alloca and inserts it into the entry 48/// block. 49llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 50 const Twine &Name) { 51 if (!Builder.isNamePreserving()) 52 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 53 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 54} 55 56void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 57 llvm::Value *Init) { 58 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 59 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 60 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 61} 62 63llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 64 const Twine &Name) { 65 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 66 // FIXME: Should we prefer the preferred type alignment here? 67 CharUnits Align = getContext().getTypeAlignInChars(Ty); 68 Alloc->setAlignment(Align.getQuantity()); 69 return Alloc; 70} 71 72llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 73 const Twine &Name) { 74 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 75 // FIXME: Should we prefer the preferred type alignment here? 76 CharUnits Align = getContext().getTypeAlignInChars(Ty); 77 Alloc->setAlignment(Align.getQuantity()); 78 return Alloc; 79} 80 81/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 82/// expression and compare the result against zero, returning an Int1Ty value. 83llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 84 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 85 llvm::Value *MemPtr = EmitScalarExpr(E); 86 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 87 } 88 89 QualType BoolTy = getContext().BoolTy; 90 if (!E->getType()->isAnyComplexType()) 91 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 92 93 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 94} 95 96/// EmitIgnoredExpr - Emit code to compute the specified expression, 97/// ignoring the result. 98void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 99 if (E->isRValue()) 100 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 101 102 // Just emit it as an l-value and drop the result. 103 EmitLValue(E); 104} 105 106/// EmitAnyExpr - Emit code to compute the specified expression which 107/// can have any type. The result is returned as an RValue struct. 108/// If this is an aggregate expression, AggSlot indicates where the 109/// result should be returned. 110RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot, 111 bool IgnoreResult) { 112 if (!hasAggregateLLVMType(E->getType())) 113 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 114 else if (E->getType()->isAnyComplexType()) 115 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult)); 116 117 EmitAggExpr(E, AggSlot, IgnoreResult); 118 return AggSlot.asRValue(); 119} 120 121/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 122/// always be accessible even if no aggregate location is provided. 123RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 124 AggValueSlot AggSlot = AggValueSlot::ignored(); 125 126 if (hasAggregateLLVMType(E->getType()) && 127 !E->getType()->isAnyComplexType()) 128 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 129 return EmitAnyExpr(E, AggSlot); 130} 131 132/// EmitAnyExprToMem - Evaluate an expression into a given memory 133/// location. 134void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 135 llvm::Value *Location, 136 Qualifiers Quals, 137 bool IsInit) { 138 // FIXME: This function should take an LValue as an argument. 139 if (E->getType()->isAnyComplexType()) { 140 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 141 } else if (hasAggregateLLVMType(E->getType())) { 142 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 143 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 144 AggValueSlot::IsDestructed_t(IsInit), 145 AggValueSlot::DoesNotNeedGCBarriers, 146 AggValueSlot::IsAliased_t(!IsInit))); 147 } else { 148 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 149 LValue LV = MakeAddrLValue(Location, E->getType()); 150 EmitStoreThroughLValue(RV, LV); 151 } 152} 153 154namespace { 155/// \brief An adjustment to be made to the temporary created when emitting a 156/// reference binding, which accesses a particular subobject of that temporary. 157 struct SubobjectAdjustment { 158 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 159 160 union { 161 struct { 162 const CastExpr *BasePath; 163 const CXXRecordDecl *DerivedClass; 164 } DerivedToBase; 165 166 FieldDecl *Field; 167 }; 168 169 SubobjectAdjustment(const CastExpr *BasePath, 170 const CXXRecordDecl *DerivedClass) 171 : Kind(DerivedToBaseAdjustment) { 172 DerivedToBase.BasePath = BasePath; 173 DerivedToBase.DerivedClass = DerivedClass; 174 } 175 176 SubobjectAdjustment(FieldDecl *Field) 177 : Kind(FieldAdjustment) { 178 this->Field = Field; 179 } 180 }; 181} 182 183static llvm::Value * 184CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 185 const NamedDecl *InitializedDecl) { 186 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 187 if (VD->hasGlobalStorage()) { 188 llvm::SmallString<256> Name; 189 llvm::raw_svector_ostream Out(Name); 190 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 191 Out.flush(); 192 193 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 194 195 // Create the reference temporary. 196 llvm::GlobalValue *RefTemp = 197 new llvm::GlobalVariable(CGF.CGM.getModule(), 198 RefTempTy, /*isConstant=*/false, 199 llvm::GlobalValue::InternalLinkage, 200 llvm::Constant::getNullValue(RefTempTy), 201 Name.str()); 202 return RefTemp; 203 } 204 } 205 206 return CGF.CreateMemTemp(Type, "ref.tmp"); 207} 208 209static llvm::Value * 210EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 211 llvm::Value *&ReferenceTemporary, 212 const CXXDestructorDecl *&ReferenceTemporaryDtor, 213 QualType &ObjCARCReferenceLifetimeType, 214 const NamedDecl *InitializedDecl) { 215 // Look through single-element init lists that claim to be lvalues. They're 216 // just syntactic wrappers in this case. 217 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) { 218 if (ILE->getNumInits() == 1 && ILE->isGLValue()) 219 E = ILE->getInit(0); 220 } 221 222 // Look through expressions for materialized temporaries (for now). 223 if (const MaterializeTemporaryExpr *M 224 = dyn_cast<MaterializeTemporaryExpr>(E)) { 225 // Objective-C++ ARC: 226 // If we are binding a reference to a temporary that has ownership, we 227 // need to perform retain/release operations on the temporary. 228 if (CGF.getContext().getLangOptions().ObjCAutoRefCount && 229 E->getType()->isObjCLifetimeType() && 230 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 231 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 232 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 233 ObjCARCReferenceLifetimeType = E->getType(); 234 235 E = M->GetTemporaryExpr(); 236 } 237 238 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 239 E = DAE->getExpr(); 240 241 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 242 CGF.enterFullExpression(EWC); 243 CodeGenFunction::RunCleanupsScope Scope(CGF); 244 245 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 246 ReferenceTemporary, 247 ReferenceTemporaryDtor, 248 ObjCARCReferenceLifetimeType, 249 InitializedDecl); 250 } 251 252 RValue RV; 253 if (E->isGLValue()) { 254 // Emit the expression as an lvalue. 255 LValue LV = CGF.EmitLValue(E); 256 257 if (LV.isSimple()) 258 return LV.getAddress(); 259 260 // We have to load the lvalue. 261 RV = CGF.EmitLoadOfLValue(LV); 262 } else { 263 if (!ObjCARCReferenceLifetimeType.isNull()) { 264 ReferenceTemporary = CreateReferenceTemporary(CGF, 265 ObjCARCReferenceLifetimeType, 266 InitializedDecl); 267 268 269 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 270 ObjCARCReferenceLifetimeType); 271 272 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 273 RefTempDst, false); 274 275 bool ExtendsLifeOfTemporary = false; 276 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 277 if (Var->extendsLifetimeOfTemporary()) 278 ExtendsLifeOfTemporary = true; 279 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 280 ExtendsLifeOfTemporary = true; 281 } 282 283 if (!ExtendsLifeOfTemporary) { 284 // Since the lifetime of this temporary isn't going to be extended, 285 // we need to clean it up ourselves at the end of the full expression. 286 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 287 case Qualifiers::OCL_None: 288 case Qualifiers::OCL_ExplicitNone: 289 case Qualifiers::OCL_Autoreleasing: 290 break; 291 292 case Qualifiers::OCL_Strong: { 293 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 294 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 295 CGF.pushDestroy(cleanupKind, 296 ReferenceTemporary, 297 ObjCARCReferenceLifetimeType, 298 CodeGenFunction::destroyARCStrongImprecise, 299 cleanupKind & EHCleanup); 300 break; 301 } 302 303 case Qualifiers::OCL_Weak: 304 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 305 CGF.pushDestroy(NormalAndEHCleanup, 306 ReferenceTemporary, 307 ObjCARCReferenceLifetimeType, 308 CodeGenFunction::destroyARCWeak, 309 /*useEHCleanupForArray*/ true); 310 break; 311 } 312 313 ObjCARCReferenceLifetimeType = QualType(); 314 } 315 316 return ReferenceTemporary; 317 } 318 319 SmallVector<SubobjectAdjustment, 2> Adjustments; 320 while (true) { 321 E = E->IgnoreParens(); 322 323 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 324 if ((CE->getCastKind() == CK_DerivedToBase || 325 CE->getCastKind() == CK_UncheckedDerivedToBase) && 326 E->getType()->isRecordType()) { 327 E = CE->getSubExpr(); 328 CXXRecordDecl *Derived 329 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 330 Adjustments.push_back(SubobjectAdjustment(CE, Derived)); 331 continue; 332 } 333 334 if (CE->getCastKind() == CK_NoOp) { 335 E = CE->getSubExpr(); 336 continue; 337 } 338 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 339 if (!ME->isArrow() && ME->getBase()->isRValue()) { 340 assert(ME->getBase()->getType()->isRecordType()); 341 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 342 E = ME->getBase(); 343 Adjustments.push_back(SubobjectAdjustment(Field)); 344 continue; 345 } 346 } 347 } 348 349 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 350 if (opaque->getType()->isRecordType()) 351 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 352 353 // Nothing changed. 354 break; 355 } 356 357 // Create a reference temporary if necessary. 358 AggValueSlot AggSlot = AggValueSlot::ignored(); 359 if (CGF.hasAggregateLLVMType(E->getType()) && 360 !E->getType()->isAnyComplexType()) { 361 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 362 InitializedDecl); 363 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType()); 364 AggValueSlot::IsDestructed_t isDestructed 365 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 366 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment, 367 Qualifiers(), isDestructed, 368 AggValueSlot::DoesNotNeedGCBarriers, 369 AggValueSlot::IsNotAliased); 370 } 371 372 if (InitializedDecl) { 373 // Get the destructor for the reference temporary. 374 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 375 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 376 if (!ClassDecl->hasTrivialDestructor()) 377 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 378 } 379 } 380 381 RV = CGF.EmitAnyExpr(E, AggSlot); 382 383 // Check if need to perform derived-to-base casts and/or field accesses, to 384 // get from the temporary object we created (and, potentially, for which we 385 // extended the lifetime) to the subobject we're binding the reference to. 386 if (!Adjustments.empty()) { 387 llvm::Value *Object = RV.getAggregateAddr(); 388 for (unsigned I = Adjustments.size(); I != 0; --I) { 389 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 390 switch (Adjustment.Kind) { 391 case SubobjectAdjustment::DerivedToBaseAdjustment: 392 Object = 393 CGF.GetAddressOfBaseClass(Object, 394 Adjustment.DerivedToBase.DerivedClass, 395 Adjustment.DerivedToBase.BasePath->path_begin(), 396 Adjustment.DerivedToBase.BasePath->path_end(), 397 /*NullCheckValue=*/false); 398 break; 399 400 case SubobjectAdjustment::FieldAdjustment: { 401 LValue LV = 402 CGF.EmitLValueForField(Object, Adjustment.Field, 0); 403 if (LV.isSimple()) { 404 Object = LV.getAddress(); 405 break; 406 } 407 408 // For non-simple lvalues, we actually have to create a copy of 409 // the object we're binding to. 410 QualType T = Adjustment.Field->getType().getNonReferenceType() 411 .getUnqualifiedType(); 412 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 413 LValue TempLV = CGF.MakeAddrLValue(Object, 414 Adjustment.Field->getType()); 415 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 416 break; 417 } 418 419 } 420 } 421 422 return Object; 423 } 424 } 425 426 if (RV.isAggregate()) 427 return RV.getAggregateAddr(); 428 429 // Create a temporary variable that we can bind the reference to. 430 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 431 InitializedDecl); 432 433 434 unsigned Alignment = 435 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 436 if (RV.isScalar()) 437 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 438 /*Volatile=*/false, Alignment, E->getType()); 439 else 440 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 441 /*Volatile=*/false); 442 return ReferenceTemporary; 443} 444 445RValue 446CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 447 const NamedDecl *InitializedDecl) { 448 llvm::Value *ReferenceTemporary = 0; 449 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 450 QualType ObjCARCReferenceLifetimeType; 451 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 452 ReferenceTemporaryDtor, 453 ObjCARCReferenceLifetimeType, 454 InitializedDecl); 455 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 456 return RValue::get(Value); 457 458 // Make sure to call the destructor for the reference temporary. 459 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 460 if (VD && VD->hasGlobalStorage()) { 461 if (ReferenceTemporaryDtor) { 462 llvm::Constant *DtorFn = 463 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 464 EmitCXXGlobalDtorRegistration(DtorFn, 465 cast<llvm::Constant>(ReferenceTemporary)); 466 } else { 467 assert(!ObjCARCReferenceLifetimeType.isNull()); 468 // Note: We intentionally do not register a global "destructor" to 469 // release the object. 470 } 471 472 return RValue::get(Value); 473 } 474 475 if (ReferenceTemporaryDtor) 476 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 477 else { 478 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 479 case Qualifiers::OCL_None: 480 llvm_unreachable( 481 "Not a reference temporary that needs to be deallocated"); 482 case Qualifiers::OCL_ExplicitNone: 483 case Qualifiers::OCL_Autoreleasing: 484 // Nothing to do. 485 break; 486 487 case Qualifiers::OCL_Strong: { 488 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 489 CleanupKind cleanupKind = getARCCleanupKind(); 490 // This local is a GCC and MSVC compiler workaround. 491 Destroyer *destroyer = precise ? &destroyARCStrongPrecise : 492 &destroyARCStrongImprecise; 493 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 494 *destroyer, cleanupKind & EHCleanup); 495 break; 496 } 497 498 case Qualifiers::OCL_Weak: { 499 // This local is a GCC and MSVC compiler workaround. 500 Destroyer *destroyer = &destroyARCWeak; 501 // __weak objects always get EH cleanups; otherwise, exceptions 502 // could cause really nasty crashes instead of mere leaks. 503 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 504 ObjCARCReferenceLifetimeType, *destroyer, true); 505 break; 506 } 507 } 508 } 509 510 return RValue::get(Value); 511} 512 513 514/// getAccessedFieldNo - Given an encoded value and a result number, return the 515/// input field number being accessed. 516unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 517 const llvm::Constant *Elts) { 518 if (isa<llvm::ConstantAggregateZero>(Elts)) 519 return 0; 520 521 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 522} 523 524void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 525 if (!CatchUndefined) 526 return; 527 528 // This needs to be to the standard address space. 529 Address = Builder.CreateBitCast(Address, Int8PtrTy); 530 531 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 532 533 // In time, people may want to control this and use a 1 here. 534 llvm::Value *Arg = Builder.getFalse(); 535 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 536 llvm::BasicBlock *Cont = createBasicBlock(); 537 llvm::BasicBlock *Check = createBasicBlock(); 538 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); 539 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 540 541 EmitBlock(Check); 542 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 543 llvm::ConstantInt::get(IntPtrTy, Size)), 544 Cont, getTrapBB()); 545 EmitBlock(Cont); 546} 547 548 549CodeGenFunction::ComplexPairTy CodeGenFunction:: 550EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 551 bool isInc, bool isPre) { 552 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 553 LV.isVolatileQualified()); 554 555 llvm::Value *NextVal; 556 if (isa<llvm::IntegerType>(InVal.first->getType())) { 557 uint64_t AmountVal = isInc ? 1 : -1; 558 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 559 560 // Add the inc/dec to the real part. 561 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 562 } else { 563 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 564 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 565 if (!isInc) 566 FVal.changeSign(); 567 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 568 569 // Add the inc/dec to the real part. 570 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 571 } 572 573 ComplexPairTy IncVal(NextVal, InVal.second); 574 575 // Store the updated result through the lvalue. 576 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 577 578 // If this is a postinc, return the value read from memory, otherwise use the 579 // updated value. 580 return isPre ? IncVal : InVal; 581} 582 583 584//===----------------------------------------------------------------------===// 585// LValue Expression Emission 586//===----------------------------------------------------------------------===// 587 588RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 589 if (Ty->isVoidType()) 590 return RValue::get(0); 591 592 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 593 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 594 llvm::Value *U = llvm::UndefValue::get(EltTy); 595 return RValue::getComplex(std::make_pair(U, U)); 596 } 597 598 // If this is a use of an undefined aggregate type, the aggregate must have an 599 // identifiable address. Just because the contents of the value are undefined 600 // doesn't mean that the address can't be taken and compared. 601 if (hasAggregateLLVMType(Ty)) { 602 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 603 return RValue::getAggregate(DestPtr); 604 } 605 606 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 607} 608 609RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 610 const char *Name) { 611 ErrorUnsupported(E, Name); 612 return GetUndefRValue(E->getType()); 613} 614 615LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 616 const char *Name) { 617 ErrorUnsupported(E, Name); 618 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 619 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 620} 621 622LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 623 LValue LV = EmitLValue(E); 624 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 625 EmitCheck(LV.getAddress(), 626 getContext().getTypeSizeInChars(E->getType()).getQuantity()); 627 return LV; 628} 629 630/// EmitLValue - Emit code to compute a designator that specifies the location 631/// of the expression. 632/// 633/// This can return one of two things: a simple address or a bitfield reference. 634/// In either case, the LLVM Value* in the LValue structure is guaranteed to be 635/// an LLVM pointer type. 636/// 637/// If this returns a bitfield reference, nothing about the pointee type of the 638/// LLVM value is known: For example, it may not be a pointer to an integer. 639/// 640/// If this returns a normal address, and if the lvalue's C type is fixed size, 641/// this method guarantees that the returned pointer type will point to an LLVM 642/// type of the same size of the lvalue's type. If the lvalue has a variable 643/// length type, this is not possible. 644/// 645LValue CodeGenFunction::EmitLValue(const Expr *E) { 646 switch (E->getStmtClass()) { 647 default: return EmitUnsupportedLValue(E, "l-value expression"); 648 649 case Expr::ObjCPropertyRefExprClass: 650 llvm_unreachable("cannot emit a property reference directly"); 651 652 case Expr::ObjCSelectorExprClass: 653 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 654 case Expr::ObjCIsaExprClass: 655 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 656 case Expr::BinaryOperatorClass: 657 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 658 case Expr::CompoundAssignOperatorClass: 659 if (!E->getType()->isAnyComplexType()) 660 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 661 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 662 case Expr::CallExprClass: 663 case Expr::CXXMemberCallExprClass: 664 case Expr::CXXOperatorCallExprClass: 665 return EmitCallExprLValue(cast<CallExpr>(E)); 666 case Expr::VAArgExprClass: 667 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 668 case Expr::DeclRefExprClass: 669 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 670 case Expr::ParenExprClass: 671 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 672 case Expr::GenericSelectionExprClass: 673 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 674 case Expr::PredefinedExprClass: 675 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 676 case Expr::StringLiteralClass: 677 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 678 case Expr::ObjCEncodeExprClass: 679 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 680 case Expr::PseudoObjectExprClass: 681 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 682 case Expr::InitListExprClass: 683 assert(cast<InitListExpr>(E)->getNumInits() == 1 && 684 "Only single-element init list can be lvalue."); 685 return EmitLValue(cast<InitListExpr>(E)->getInit(0)); 686 687 case Expr::BlockDeclRefExprClass: 688 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 689 690 case Expr::CXXTemporaryObjectExprClass: 691 case Expr::CXXConstructExprClass: 692 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 693 case Expr::CXXBindTemporaryExprClass: 694 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 695 696 case Expr::ExprWithCleanupsClass: { 697 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 698 enterFullExpression(cleanups); 699 RunCleanupsScope Scope(*this); 700 return EmitLValue(cleanups->getSubExpr()); 701 } 702 703 case Expr::CXXScalarValueInitExprClass: 704 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 705 case Expr::CXXDefaultArgExprClass: 706 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 707 case Expr::CXXTypeidExprClass: 708 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 709 710 case Expr::ObjCMessageExprClass: 711 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 712 case Expr::ObjCIvarRefExprClass: 713 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 714 case Expr::StmtExprClass: 715 return EmitStmtExprLValue(cast<StmtExpr>(E)); 716 case Expr::UnaryOperatorClass: 717 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 718 case Expr::ArraySubscriptExprClass: 719 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 720 case Expr::ExtVectorElementExprClass: 721 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 722 case Expr::MemberExprClass: 723 return EmitMemberExpr(cast<MemberExpr>(E)); 724 case Expr::CompoundLiteralExprClass: 725 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 726 case Expr::ConditionalOperatorClass: 727 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 728 case Expr::BinaryConditionalOperatorClass: 729 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 730 case Expr::ChooseExprClass: 731 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 732 case Expr::OpaqueValueExprClass: 733 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 734 case Expr::SubstNonTypeTemplateParmExprClass: 735 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 736 case Expr::ImplicitCastExprClass: 737 case Expr::CStyleCastExprClass: 738 case Expr::CXXFunctionalCastExprClass: 739 case Expr::CXXStaticCastExprClass: 740 case Expr::CXXDynamicCastExprClass: 741 case Expr::CXXReinterpretCastExprClass: 742 case Expr::CXXConstCastExprClass: 743 case Expr::ObjCBridgedCastExprClass: 744 return EmitCastLValue(cast<CastExpr>(E)); 745 746 case Expr::MaterializeTemporaryExprClass: 747 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 748 } 749} 750 751llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 752 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 753 lvalue.getAlignment().getQuantity(), 754 lvalue.getType(), lvalue.getTBAAInfo()); 755} 756 757llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 758 unsigned Alignment, QualType Ty, 759 llvm::MDNode *TBAAInfo) { 760 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 761 if (Volatile) 762 Load->setVolatile(true); 763 if (Alignment) 764 Load->setAlignment(Alignment); 765 if (TBAAInfo) 766 CGM.DecorateInstruction(Load, TBAAInfo); 767 // If this is an atomic type, all normal reads must be atomic 768 if (Ty->isAtomicType()) 769 Load->setAtomic(llvm::SequentiallyConsistent); 770 771 return EmitFromMemory(Load, Ty); 772} 773 774static bool isBooleanUnderlyingType(QualType Ty) { 775 if (const EnumType *ET = dyn_cast<EnumType>(Ty)) 776 return ET->getDecl()->getIntegerType()->isBooleanType(); 777 return false; 778} 779 780llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 781 // Bool has a different representation in memory than in registers. 782 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 783 // This should really always be an i1, but sometimes it's already 784 // an i8, and it's awkward to track those cases down. 785 if (Value->getType()->isIntegerTy(1)) 786 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool"); 787 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8"); 788 } 789 790 return Value; 791} 792 793llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 794 // Bool has a different representation in memory than in registers. 795 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 796 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8"); 797 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 798 } 799 800 return Value; 801} 802 803void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 804 bool Volatile, unsigned Alignment, 805 QualType Ty, 806 llvm::MDNode *TBAAInfo, 807 bool isInit) { 808 Value = EmitToMemory(Value, Ty); 809 810 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 811 if (Alignment) 812 Store->setAlignment(Alignment); 813 if (TBAAInfo) 814 CGM.DecorateInstruction(Store, TBAAInfo); 815 if (!isInit && Ty->isAtomicType()) 816 Store->setAtomic(llvm::SequentiallyConsistent); 817} 818 819void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 820 bool isInit) { 821 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 822 lvalue.getAlignment().getQuantity(), lvalue.getType(), 823 lvalue.getTBAAInfo(), isInit); 824} 825 826/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 827/// method emits the address of the lvalue, then loads the result as an rvalue, 828/// returning the rvalue. 829RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 830 if (LV.isObjCWeak()) { 831 // load of a __weak object. 832 llvm::Value *AddrWeakObj = LV.getAddress(); 833 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 834 AddrWeakObj)); 835 } 836 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) 837 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 838 839 if (LV.isSimple()) { 840 assert(!LV.getType()->isFunctionType()); 841 842 // Everything needs a load. 843 return RValue::get(EmitLoadOfScalar(LV)); 844 } 845 846 if (LV.isVectorElt()) { 847 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 848 LV.isVolatileQualified()); 849 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 850 "vecext")); 851 } 852 853 // If this is a reference to a subset of the elements of a vector, either 854 // shuffle the input or extract/insert them as appropriate. 855 if (LV.isExtVectorElt()) 856 return EmitLoadOfExtVectorElementLValue(LV); 857 858 assert(LV.isBitField() && "Unknown LValue type!"); 859 return EmitLoadOfBitfieldLValue(LV); 860} 861 862RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 863 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 864 865 // Get the output type. 866 llvm::Type *ResLTy = ConvertType(LV.getType()); 867 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 868 869 // Compute the result as an OR of all of the individual component accesses. 870 llvm::Value *Res = 0; 871 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 872 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 873 874 // Get the field pointer. 875 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 876 877 // Only offset by the field index if used, so that incoming values are not 878 // required to be structures. 879 if (AI.FieldIndex) 880 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 881 882 // Offset by the byte offset, if used. 883 if (!AI.FieldByteOffset.isZero()) { 884 Ptr = EmitCastToVoidPtr(Ptr); 885 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 886 "bf.field.offs"); 887 } 888 889 // Cast to the access type. 890 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 891 AI.AccessWidth, 892 CGM.getContext().getTargetAddressSpace(LV.getType())); 893 Ptr = Builder.CreateBitCast(Ptr, PTy); 894 895 // Perform the load. 896 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 897 if (!AI.AccessAlignment.isZero()) 898 Load->setAlignment(AI.AccessAlignment.getQuantity()); 899 900 // Shift out unused low bits and mask out unused high bits. 901 llvm::Value *Val = Load; 902 if (AI.FieldBitStart) 903 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 904 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 905 AI.TargetBitWidth), 906 "bf.clear"); 907 908 // Extend or truncate to the target size. 909 if (AI.AccessWidth < ResSizeInBits) 910 Val = Builder.CreateZExt(Val, ResLTy); 911 else if (AI.AccessWidth > ResSizeInBits) 912 Val = Builder.CreateTrunc(Val, ResLTy); 913 914 // Shift into place, and OR into the result. 915 if (AI.TargetBitOffset) 916 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 917 Res = Res ? Builder.CreateOr(Res, Val) : Val; 918 } 919 920 // If the bit-field is signed, perform the sign-extension. 921 // 922 // FIXME: This can easily be folded into the load of the high bits, which 923 // could also eliminate the mask of high bits in some situations. 924 if (Info.isSigned()) { 925 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 926 if (ExtraBits) 927 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 928 ExtraBits, "bf.val.sext"); 929 } 930 931 return RValue::get(Res); 932} 933 934// If this is a reference to a subset of the elements of a vector, create an 935// appropriate shufflevector. 936RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 937 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 938 LV.isVolatileQualified()); 939 940 const llvm::Constant *Elts = LV.getExtVectorElts(); 941 942 // If the result of the expression is a non-vector type, we must be extracting 943 // a single element. Just codegen as an extractelement. 944 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 945 if (!ExprVT) { 946 unsigned InIdx = getAccessedFieldNo(0, Elts); 947 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 948 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 949 } 950 951 // Always use shuffle vector to try to retain the original program structure 952 unsigned NumResultElts = ExprVT->getNumElements(); 953 954 SmallVector<llvm::Constant*, 4> Mask; 955 for (unsigned i = 0; i != NumResultElts; ++i) { 956 unsigned InIdx = getAccessedFieldNo(i, Elts); 957 Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); 958 } 959 960 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 961 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 962 MaskV); 963 return RValue::get(Vec); 964} 965 966 967 968/// EmitStoreThroughLValue - Store the specified rvalue into the specified 969/// lvalue, where both are guaranteed to the have the same type, and that type 970/// is 'Ty'. 971void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { 972 if (!Dst.isSimple()) { 973 if (Dst.isVectorElt()) { 974 // Read/modify/write the vector, inserting the new element. 975 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 976 Dst.isVolatileQualified()); 977 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 978 Dst.getVectorIdx(), "vecins"); 979 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 980 return; 981 } 982 983 // If this is an update of extended vector elements, insert them as 984 // appropriate. 985 if (Dst.isExtVectorElt()) 986 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 987 988 assert(Dst.isBitField() && "Unknown LValue type"); 989 return EmitStoreThroughBitfieldLValue(Src, Dst); 990 } 991 992 // There's special magic for assigning into an ARC-qualified l-value. 993 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 994 switch (Lifetime) { 995 case Qualifiers::OCL_None: 996 llvm_unreachable("present but none"); 997 998 case Qualifiers::OCL_ExplicitNone: 999 // nothing special 1000 break; 1001 1002 case Qualifiers::OCL_Strong: 1003 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1004 return; 1005 1006 case Qualifiers::OCL_Weak: 1007 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1008 return; 1009 1010 case Qualifiers::OCL_Autoreleasing: 1011 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1012 Src.getScalarVal())); 1013 // fall into the normal path 1014 break; 1015 } 1016 } 1017 1018 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1019 // load of a __weak object. 1020 llvm::Value *LvalueDst = Dst.getAddress(); 1021 llvm::Value *src = Src.getScalarVal(); 1022 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1023 return; 1024 } 1025 1026 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1027 // load of a __strong object. 1028 llvm::Value *LvalueDst = Dst.getAddress(); 1029 llvm::Value *src = Src.getScalarVal(); 1030 if (Dst.isObjCIvar()) { 1031 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1032 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1033 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1034 llvm::Value *dst = RHS; 1035 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1036 llvm::Value *LHS = 1037 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1038 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1039 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1040 BytesBetween); 1041 } else if (Dst.isGlobalObjCRef()) { 1042 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1043 Dst.isThreadLocalRef()); 1044 } 1045 else 1046 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1047 return; 1048 } 1049 1050 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1051 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1052} 1053 1054void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1055 llvm::Value **Result) { 1056 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1057 1058 // Get the output type. 1059 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1060 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 1061 1062 // Get the source value, truncated to the width of the bit-field. 1063 llvm::Value *SrcVal = Src.getScalarVal(); 1064 1065 if (Dst.getType()->isBooleanType()) 1066 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 1067 1068 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 1069 Info.getSize()), 1070 "bf.value"); 1071 1072 // Return the new value of the bit-field, if requested. 1073 if (Result) { 1074 // Cast back to the proper type for result. 1075 llvm::Type *SrcTy = Src.getScalarVal()->getType(); 1076 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 1077 "bf.reload.val"); 1078 1079 // Sign extend if necessary. 1080 if (Info.isSigned()) { 1081 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1082 if (ExtraBits) 1083 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 1084 ExtraBits, "bf.reload.sext"); 1085 } 1086 1087 *Result = ReloadVal; 1088 } 1089 1090 // Iterate over the components, writing each piece to memory. 1091 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1092 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1093 1094 // Get the field pointer. 1095 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 1096 unsigned addressSpace = 1097 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); 1098 1099 // Only offset by the field index if used, so that incoming values are not 1100 // required to be structures. 1101 if (AI.FieldIndex) 1102 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1103 1104 // Offset by the byte offset, if used. 1105 if (!AI.FieldByteOffset.isZero()) { 1106 Ptr = EmitCastToVoidPtr(Ptr); 1107 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1108 "bf.field.offs"); 1109 } 1110 1111 // Cast to the access type. 1112 llvm::Type *AccessLTy = 1113 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth); 1114 1115 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace); 1116 Ptr = Builder.CreateBitCast(Ptr, PTy); 1117 1118 // Extract the piece of the bit-field value to write in this access, limited 1119 // to the values that are part of this access. 1120 llvm::Value *Val = SrcVal; 1121 if (AI.TargetBitOffset) 1122 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 1123 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 1124 AI.TargetBitWidth)); 1125 1126 // Extend or truncate to the access size. 1127 if (ResSizeInBits < AI.AccessWidth) 1128 Val = Builder.CreateZExt(Val, AccessLTy); 1129 else if (ResSizeInBits > AI.AccessWidth) 1130 Val = Builder.CreateTrunc(Val, AccessLTy); 1131 1132 // Shift into the position in memory. 1133 if (AI.FieldBitStart) 1134 Val = Builder.CreateShl(Val, AI.FieldBitStart); 1135 1136 // If necessary, load and OR in bits that are outside of the bit-field. 1137 if (AI.TargetBitWidth != AI.AccessWidth) { 1138 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 1139 if (!AI.AccessAlignment.isZero()) 1140 Load->setAlignment(AI.AccessAlignment.getQuantity()); 1141 1142 // Compute the mask for zeroing the bits that are part of the bit-field. 1143 llvm::APInt InvMask = 1144 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 1145 AI.FieldBitStart + AI.TargetBitWidth); 1146 1147 // Apply the mask and OR in to the value to write. 1148 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1149 } 1150 1151 // Write the value. 1152 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1153 Dst.isVolatileQualified()); 1154 if (!AI.AccessAlignment.isZero()) 1155 Store->setAlignment(AI.AccessAlignment.getQuantity()); 1156 } 1157} 1158 1159void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1160 LValue Dst) { 1161 // This access turns into a read/modify/write of the vector. Load the input 1162 // value now. 1163 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 1164 Dst.isVolatileQualified()); 1165 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1166 1167 llvm::Value *SrcVal = Src.getScalarVal(); 1168 1169 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1170 unsigned NumSrcElts = VTy->getNumElements(); 1171 unsigned NumDstElts = 1172 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1173 if (NumDstElts == NumSrcElts) { 1174 // Use shuffle vector is the src and destination are the same number of 1175 // elements and restore the vector mask since it is on the side it will be 1176 // stored. 1177 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1178 for (unsigned i = 0; i != NumSrcElts; ++i) { 1179 unsigned InIdx = getAccessedFieldNo(i, Elts); 1180 Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); 1181 } 1182 1183 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1184 Vec = Builder.CreateShuffleVector(SrcVal, 1185 llvm::UndefValue::get(Vec->getType()), 1186 MaskV); 1187 } else if (NumDstElts > NumSrcElts) { 1188 // Extended the source vector to the same length and then shuffle it 1189 // into the destination. 1190 // FIXME: since we're shuffling with undef, can we just use the indices 1191 // into that? This could be simpler. 1192 SmallVector<llvm::Constant*, 4> ExtMask; 1193 unsigned i; 1194 for (i = 0; i != NumSrcElts; ++i) 1195 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1196 for (; i != NumDstElts; ++i) 1197 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1198 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1199 llvm::Value *ExtSrcVal = 1200 Builder.CreateShuffleVector(SrcVal, 1201 llvm::UndefValue::get(SrcVal->getType()), 1202 ExtMaskV); 1203 // build identity 1204 SmallVector<llvm::Constant*, 4> Mask; 1205 for (unsigned i = 0; i != NumDstElts; ++i) 1206 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1207 1208 // modify when what gets shuffled in 1209 for (unsigned i = 0; i != NumSrcElts; ++i) { 1210 unsigned Idx = getAccessedFieldNo(i, Elts); 1211 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 1212 } 1213 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1214 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1215 } else { 1216 // We should never shorten the vector 1217 llvm_unreachable("unexpected shorten vector length"); 1218 } 1219 } else { 1220 // If the Src is a scalar (not a vector) it must be updating one element. 1221 unsigned InIdx = getAccessedFieldNo(0, Elts); 1222 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1223 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1224 } 1225 1226 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1227} 1228 1229// setObjCGCLValueClass - sets class of he lvalue for the purpose of 1230// generating write-barries API. It is currently a global, ivar, 1231// or neither. 1232static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1233 LValue &LV, 1234 bool IsMemberAccess=false) { 1235 if (Ctx.getLangOptions().getGC() == LangOptions::NonGC) 1236 return; 1237 1238 if (isa<ObjCIvarRefExpr>(E)) { 1239 QualType ExpTy = E->getType(); 1240 if (IsMemberAccess && ExpTy->isPointerType()) { 1241 // If ivar is a structure pointer, assigning to field of 1242 // this struct follows gcc's behavior and makes it a non-ivar 1243 // writer-barrier conservatively. 1244 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1245 if (ExpTy->isRecordType()) { 1246 LV.setObjCIvar(false); 1247 return; 1248 } 1249 } 1250 LV.setObjCIvar(true); 1251 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1252 LV.setBaseIvarExp(Exp->getBase()); 1253 LV.setObjCArray(E->getType()->isArrayType()); 1254 return; 1255 } 1256 1257 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1258 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1259 if (VD->hasGlobalStorage()) { 1260 LV.setGlobalObjCRef(true); 1261 LV.setThreadLocalRef(VD->isThreadSpecified()); 1262 } 1263 } 1264 LV.setObjCArray(E->getType()->isArrayType()); 1265 return; 1266 } 1267 1268 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1269 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1270 return; 1271 } 1272 1273 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1274 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1275 if (LV.isObjCIvar()) { 1276 // If cast is to a structure pointer, follow gcc's behavior and make it 1277 // a non-ivar write-barrier. 1278 QualType ExpTy = E->getType(); 1279 if (ExpTy->isPointerType()) 1280 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1281 if (ExpTy->isRecordType()) 1282 LV.setObjCIvar(false); 1283 } 1284 return; 1285 } 1286 1287 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1288 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1289 return; 1290 } 1291 1292 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1293 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1294 return; 1295 } 1296 1297 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1298 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1299 return; 1300 } 1301 1302 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1303 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1304 return; 1305 } 1306 1307 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1308 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1309 if (LV.isObjCIvar() && !LV.isObjCArray()) 1310 // Using array syntax to assigning to what an ivar points to is not 1311 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1312 LV.setObjCIvar(false); 1313 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1314 // Using array syntax to assigning to what global points to is not 1315 // same as assigning to the global itself. {id *G;} G[i] = 0; 1316 LV.setGlobalObjCRef(false); 1317 return; 1318 } 1319 1320 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1321 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1322 // We don't know if member is an 'ivar', but this flag is looked at 1323 // only in the context of LV.isObjCIvar(). 1324 LV.setObjCArray(E->getType()->isArrayType()); 1325 return; 1326 } 1327} 1328 1329static llvm::Value * 1330EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1331 llvm::Value *V, llvm::Type *IRType, 1332 StringRef Name = StringRef()) { 1333 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1334 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1335} 1336 1337static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1338 const Expr *E, const VarDecl *VD) { 1339 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1340 "Var decl must have external storage or be a file var decl!"); 1341 1342 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1343 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1344 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1345 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1346 QualType T = E->getType(); 1347 LValue LV; 1348 if (VD->getType()->isReferenceType()) { 1349 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1350 LI->setAlignment(Alignment.getQuantity()); 1351 V = LI; 1352 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1353 } else { 1354 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1355 } 1356 setObjCGCLValueClass(CGF.getContext(), E, LV); 1357 return LV; 1358} 1359 1360static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1361 const Expr *E, const FunctionDecl *FD) { 1362 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1363 if (!FD->hasPrototype()) { 1364 if (const FunctionProtoType *Proto = 1365 FD->getType()->getAs<FunctionProtoType>()) { 1366 // Ugly case: for a K&R-style definition, the type of the definition 1367 // isn't the same as the type of a use. Correct for this with a 1368 // bitcast. 1369 QualType NoProtoType = 1370 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1371 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1372 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1373 } 1374 } 1375 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1376 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1377} 1378 1379LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1380 const NamedDecl *ND = E->getDecl(); 1381 CharUnits Alignment = getContext().getDeclAlign(ND); 1382 QualType T = E->getType(); 1383 1384 // FIXME: We should be able to assert this for FunctionDecls as well! 1385 // FIXME: We should be able to assert this for all DeclRefExprs, not just 1386 // those with a valid source location. 1387 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 1388 !E->getLocation().isValid()) && 1389 "Should not use decl without marking it used!"); 1390 1391 if (ND->hasAttr<WeakRefAttr>()) { 1392 const ValueDecl *VD = cast<ValueDecl>(ND); 1393 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1394 return MakeAddrLValue(Aliasee, E->getType(), Alignment); 1395 } 1396 1397 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1398 1399 // Check if this is a global variable. 1400 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1401 return EmitGlobalVarDeclLValue(*this, E, VD); 1402 1403 bool NonGCable = VD->hasLocalStorage() && 1404 !VD->getType()->isReferenceType() && 1405 !VD->hasAttr<BlocksAttr>(); 1406 1407 llvm::Value *V = LocalDeclMap[VD]; 1408 if (!V && VD->isStaticLocal()) 1409 V = CGM.getStaticLocalDeclAddress(VD); 1410 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1411 1412 if (VD->hasAttr<BlocksAttr>()) 1413 V = BuildBlockByrefAddress(V, VD); 1414 1415 LValue LV; 1416 if (VD->getType()->isReferenceType()) { 1417 llvm::LoadInst *LI = Builder.CreateLoad(V); 1418 LI->setAlignment(Alignment.getQuantity()); 1419 V = LI; 1420 LV = MakeNaturalAlignAddrLValue(V, T); 1421 } else { 1422 LV = MakeAddrLValue(V, T, Alignment); 1423 } 1424 1425 if (NonGCable) { 1426 LV.getQuals().removeObjCGCAttr(); 1427 LV.setNonGC(true); 1428 } 1429 setObjCGCLValueClass(getContext(), E, LV); 1430 return LV; 1431 } 1432 1433 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1434 return EmitFunctionDeclLValue(*this, E, fn); 1435 1436 llvm_unreachable("Unhandled DeclRefExpr"); 1437} 1438 1439LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1440 CharUnits Alignment = getContext().getDeclAlign(E->getDecl()); 1441 return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment); 1442} 1443 1444LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1445 // __extension__ doesn't affect lvalue-ness. 1446 if (E->getOpcode() == UO_Extension) 1447 return EmitLValue(E->getSubExpr()); 1448 1449 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1450 switch (E->getOpcode()) { 1451 default: llvm_unreachable("Unknown unary operator lvalue!"); 1452 case UO_Deref: { 1453 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1454 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1455 1456 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1457 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1458 1459 // We should not generate __weak write barrier on indirect reference 1460 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1461 // But, we continue to generate __strong write barrier on indirect write 1462 // into a pointer to object. 1463 if (getContext().getLangOptions().ObjC1 && 1464 getContext().getLangOptions().getGC() != LangOptions::NonGC && 1465 LV.isObjCWeak()) 1466 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1467 return LV; 1468 } 1469 case UO_Real: 1470 case UO_Imag: { 1471 LValue LV = EmitLValue(E->getSubExpr()); 1472 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1473 llvm::Value *Addr = LV.getAddress(); 1474 1475 // real and imag are valid on scalars. This is a faster way of 1476 // testing that. 1477 if (!cast<llvm::PointerType>(Addr->getType()) 1478 ->getElementType()->isStructTy()) { 1479 assert(E->getSubExpr()->getType()->isArithmeticType()); 1480 return LV; 1481 } 1482 1483 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1484 1485 unsigned Idx = E->getOpcode() == UO_Imag; 1486 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1487 Idx, "idx"), 1488 ExprTy); 1489 } 1490 case UO_PreInc: 1491 case UO_PreDec: { 1492 LValue LV = EmitLValue(E->getSubExpr()); 1493 bool isInc = E->getOpcode() == UO_PreInc; 1494 1495 if (E->getType()->isAnyComplexType()) 1496 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1497 else 1498 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1499 return LV; 1500 } 1501 } 1502} 1503 1504LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1505 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1506 E->getType()); 1507} 1508 1509LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1510 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1511 E->getType()); 1512} 1513 1514 1515LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1516 switch (E->getIdentType()) { 1517 default: 1518 return EmitUnsupportedLValue(E, "predefined expression"); 1519 1520 case PredefinedExpr::Func: 1521 case PredefinedExpr::Function: 1522 case PredefinedExpr::PrettyFunction: { 1523 unsigned Type = E->getIdentType(); 1524 std::string GlobalVarName; 1525 1526 switch (Type) { 1527 default: llvm_unreachable("Invalid type"); 1528 case PredefinedExpr::Func: 1529 GlobalVarName = "__func__."; 1530 break; 1531 case PredefinedExpr::Function: 1532 GlobalVarName = "__FUNCTION__."; 1533 break; 1534 case PredefinedExpr::PrettyFunction: 1535 GlobalVarName = "__PRETTY_FUNCTION__."; 1536 break; 1537 } 1538 1539 StringRef FnName = CurFn->getName(); 1540 if (FnName.startswith("\01")) 1541 FnName = FnName.substr(1); 1542 GlobalVarName += FnName; 1543 1544 const Decl *CurDecl = CurCodeDecl; 1545 if (CurDecl == 0) 1546 CurDecl = getContext().getTranslationUnitDecl(); 1547 1548 std::string FunctionName = 1549 (isa<BlockDecl>(CurDecl) 1550 ? FnName.str() 1551 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl)); 1552 1553 llvm::Constant *C = 1554 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1555 return MakeAddrLValue(C, E->getType()); 1556 } 1557 } 1558} 1559 1560llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1561 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1562 1563 // If we are not optimzing, don't collapse all calls to trap in the function 1564 // to the same call, that way, in the debugger they can see which operation 1565 // did in fact fail. If we are optimizing, we collapse all calls to trap down 1566 // to just one per function to save on codesize. 1567 if (GCO.OptimizationLevel && TrapBB) 1568 return TrapBB; 1569 1570 llvm::BasicBlock *Cont = 0; 1571 if (HaveInsertPoint()) { 1572 Cont = createBasicBlock("cont"); 1573 EmitBranch(Cont); 1574 } 1575 TrapBB = createBasicBlock("trap"); 1576 EmitBlock(TrapBB); 1577 1578 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 1579 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1580 TrapCall->setDoesNotReturn(); 1581 TrapCall->setDoesNotThrow(); 1582 Builder.CreateUnreachable(); 1583 1584 if (Cont) 1585 EmitBlock(Cont); 1586 return TrapBB; 1587} 1588 1589/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 1590/// array to pointer, return the array subexpression. 1591static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 1592 // If this isn't just an array->pointer decay, bail out. 1593 const CastExpr *CE = dyn_cast<CastExpr>(E); 1594 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 1595 return 0; 1596 1597 // If this is a decay from variable width array, bail out. 1598 const Expr *SubExpr = CE->getSubExpr(); 1599 if (SubExpr->getType()->isVariableArrayType()) 1600 return 0; 1601 1602 return SubExpr; 1603} 1604 1605LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1606 // The index must always be an integer, which is not an aggregate. Emit it. 1607 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1608 QualType IdxTy = E->getIdx()->getType(); 1609 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 1610 1611 // If the base is a vector type, then we are forming a vector element lvalue 1612 // with this subscript. 1613 if (E->getBase()->getType()->isVectorType()) { 1614 // Emit the vector as an lvalue to get its address. 1615 LValue LHS = EmitLValue(E->getBase()); 1616 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1617 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 1618 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1619 E->getBase()->getType()); 1620 } 1621 1622 // Extend or truncate the index type to 32 or 64-bits. 1623 if (Idx->getType() != IntPtrTy) 1624 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 1625 1626 // FIXME: As llvm implements the object size checking, this can come out. 1627 if (CatchUndefined) { 1628 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ 1629 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1630 if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1631 if (const ConstantArrayType *CAT 1632 = getContext().getAsConstantArrayType(DRE->getType())) { 1633 llvm::APInt Size = CAT->getSize(); 1634 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1635 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1636 llvm::ConstantInt::get(Idx->getType(), Size)), 1637 Cont, getTrapBB()); 1638 EmitBlock(Cont); 1639 } 1640 } 1641 } 1642 } 1643 } 1644 1645 // We know that the pointer points to a type of the correct size, unless the 1646 // size is a VLA or Objective-C interface. 1647 llvm::Value *Address = 0; 1648 CharUnits ArrayAlignment; 1649 if (const VariableArrayType *vla = 1650 getContext().getAsVariableArrayType(E->getType())) { 1651 // The base must be a pointer, which is not an aggregate. Emit 1652 // it. It needs to be emitted first in case it's what captures 1653 // the VLA bounds. 1654 Address = EmitScalarExpr(E->getBase()); 1655 1656 // The element count here is the total number of non-VLA elements. 1657 llvm::Value *numElements = getVLASize(vla).first; 1658 1659 // Effectively, the multiply by the VLA size is part of the GEP. 1660 // GEP indexes are signed, and scaling an index isn't permitted to 1661 // signed-overflow, so we use the same semantics for our explicit 1662 // multiply. We suppress this if overflow is not undefined behavior. 1663 if (getLangOptions().isSignedOverflowDefined()) { 1664 Idx = Builder.CreateMul(Idx, numElements); 1665 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1666 } else { 1667 Idx = Builder.CreateNSWMul(Idx, numElements); 1668 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 1669 } 1670 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 1671 // Indexing over an interface, as in "NSString *P; P[4];" 1672 llvm::Value *InterfaceSize = 1673 llvm::ConstantInt::get(Idx->getType(), 1674 getContext().getTypeSizeInChars(OIT).getQuantity()); 1675 1676 Idx = Builder.CreateMul(Idx, InterfaceSize); 1677 1678 // The base must be a pointer, which is not an aggregate. Emit it. 1679 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1680 Address = EmitCastToVoidPtr(Base); 1681 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1682 Address = Builder.CreateBitCast(Address, Base->getType()); 1683 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 1684 // If this is A[i] where A is an array, the frontend will have decayed the 1685 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 1686 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 1687 // "gep x, i" here. Emit one "gep A, 0, i". 1688 assert(Array->getType()->isArrayType() && 1689 "Array to pointer decay must have array source type!"); 1690 LValue ArrayLV = EmitLValue(Array); 1691 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 1692 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1693 llvm::Value *Args[] = { Zero, Idx }; 1694 1695 // Propagate the alignment from the array itself to the result. 1696 ArrayAlignment = ArrayLV.getAlignment(); 1697 1698 if (getContext().getLangOptions().isSignedOverflowDefined()) 1699 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 1700 else 1701 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 1702 } else { 1703 // The base must be a pointer, which is not an aggregate. Emit it. 1704 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1705 if (getContext().getLangOptions().isSignedOverflowDefined()) 1706 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 1707 else 1708 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1709 } 1710 1711 QualType T = E->getBase()->getType()->getPointeeType(); 1712 assert(!T.isNull() && 1713 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1714 1715 1716 // Limit the alignment to that of the result type. 1717 LValue LV; 1718 if (!ArrayAlignment.isZero()) { 1719 CharUnits Align = getContext().getTypeAlignInChars(T); 1720 ArrayAlignment = std::min(Align, ArrayAlignment); 1721 LV = MakeAddrLValue(Address, T, ArrayAlignment); 1722 } else { 1723 LV = MakeNaturalAlignAddrLValue(Address, T); 1724 } 1725 1726 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 1727 1728 if (getContext().getLangOptions().ObjC1 && 1729 getContext().getLangOptions().getGC() != LangOptions::NonGC) { 1730 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1731 setObjCGCLValueClass(getContext(), E, LV); 1732 } 1733 return LV; 1734} 1735 1736static 1737llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1738 SmallVector<unsigned, 4> &Elts) { 1739 SmallVector<llvm::Constant*, 4> CElts; 1740 1741 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1742 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1743 CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); 1744 1745 return llvm::ConstantVector::get(CElts); 1746} 1747 1748LValue CodeGenFunction:: 1749EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1750 // Emit the base vector as an l-value. 1751 LValue Base; 1752 1753 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1754 if (E->isArrow()) { 1755 // If it is a pointer to a vector, emit the address and form an lvalue with 1756 // it. 1757 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1758 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1759 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 1760 Base.getQuals().removeObjCGCAttr(); 1761 } else if (E->getBase()->isGLValue()) { 1762 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1763 // emit the base as an lvalue. 1764 assert(E->getBase()->getType()->isVectorType()); 1765 Base = EmitLValue(E->getBase()); 1766 } else { 1767 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1768 assert(E->getBase()->getType()->isVectorType() && 1769 "Result must be a vector"); 1770 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1771 1772 // Store the vector to memory (because LValue wants an address). 1773 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1774 Builder.CreateStore(Vec, VecMem); 1775 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 1776 } 1777 1778 QualType type = 1779 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 1780 1781 // Encode the element access list into a vector of unsigned indices. 1782 SmallVector<unsigned, 4> Indices; 1783 E->getEncodedElementAccess(Indices); 1784 1785 if (Base.isSimple()) { 1786 llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices); 1787 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type); 1788 } 1789 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1790 1791 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1792 SmallVector<llvm::Constant *, 4> CElts; 1793 1794 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1795 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1796 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1797 else 1798 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1799 } 1800 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 1801 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type); 1802} 1803 1804LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1805 bool isNonGC = false; 1806 Expr *BaseExpr = E->getBase(); 1807 llvm::Value *BaseValue = NULL; 1808 Qualifiers BaseQuals; 1809 1810 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1811 if (E->isArrow()) { 1812 BaseValue = EmitScalarExpr(BaseExpr); 1813 const PointerType *PTy = 1814 BaseExpr->getType()->getAs<PointerType>(); 1815 BaseQuals = PTy->getPointeeType().getQualifiers(); 1816 } else { 1817 LValue BaseLV = EmitLValue(BaseExpr); 1818 if (BaseLV.isNonGC()) 1819 isNonGC = true; 1820 // FIXME: this isn't right for bitfields. 1821 BaseValue = BaseLV.getAddress(); 1822 QualType BaseTy = BaseExpr->getType(); 1823 BaseQuals = BaseTy.getQualifiers(); 1824 } 1825 1826 NamedDecl *ND = E->getMemberDecl(); 1827 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1828 LValue LV = EmitLValueForField(BaseValue, Field, 1829 BaseQuals.getCVRQualifiers()); 1830 LV.setNonGC(isNonGC); 1831 setObjCGCLValueClass(getContext(), E, LV); 1832 return LV; 1833 } 1834 1835 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1836 return EmitGlobalVarDeclLValue(*this, E, VD); 1837 1838 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1839 return EmitFunctionDeclLValue(*this, E, FD); 1840 1841 llvm_unreachable("Unhandled member declaration!"); 1842} 1843 1844LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue, 1845 const FieldDecl *Field, 1846 unsigned CVRQualifiers) { 1847 const CGRecordLayout &RL = 1848 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1849 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1850 return LValue::MakeBitfield(BaseValue, Info, 1851 Field->getType().withCVRQualifiers(CVRQualifiers)); 1852} 1853 1854/// EmitLValueForAnonRecordField - Given that the field is a member of 1855/// an anonymous struct or union buried inside a record, and given 1856/// that the base value is a pointer to the enclosing record, derive 1857/// an lvalue for the ultimate field. 1858LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1859 const IndirectFieldDecl *Field, 1860 unsigned CVRQualifiers) { 1861 IndirectFieldDecl::chain_iterator I = Field->chain_begin(), 1862 IEnd = Field->chain_end(); 1863 while (true) { 1864 LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), 1865 CVRQualifiers); 1866 if (++I == IEnd) return LV; 1867 1868 assert(LV.isSimple()); 1869 BaseValue = LV.getAddress(); 1870 CVRQualifiers |= LV.getVRQualifiers(); 1871 } 1872} 1873 1874LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr, 1875 const FieldDecl *field, 1876 unsigned cvr) { 1877 if (field->isBitField()) 1878 return EmitLValueForBitfield(baseAddr, field, cvr); 1879 1880 const RecordDecl *rec = field->getParent(); 1881 QualType type = field->getType(); 1882 CharUnits alignment = getContext().getDeclAlign(field); 1883 1884 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 1885 1886 llvm::Value *addr = baseAddr; 1887 if (rec->isUnion()) { 1888 // For unions, there is no pointer adjustment. 1889 assert(!type->isReferenceType() && "union has reference member"); 1890 } else { 1891 // For structs, we GEP to the field that the record layout suggests. 1892 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 1893 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 1894 1895 // If this is a reference field, load the reference right now. 1896 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 1897 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 1898 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 1899 load->setAlignment(alignment.getQuantity()); 1900 1901 if (CGM.shouldUseTBAA()) { 1902 llvm::MDNode *tbaa; 1903 if (mayAlias) 1904 tbaa = CGM.getTBAAInfo(getContext().CharTy); 1905 else 1906 tbaa = CGM.getTBAAInfo(type); 1907 CGM.DecorateInstruction(load, tbaa); 1908 } 1909 1910 addr = load; 1911 mayAlias = false; 1912 type = refType->getPointeeType(); 1913 if (type->isIncompleteType()) 1914 alignment = CharUnits(); 1915 else 1916 alignment = getContext().getTypeAlignInChars(type); 1917 cvr = 0; // qualifiers don't recursively apply to referencee 1918 } 1919 } 1920 1921 // Make sure that the address is pointing to the right type. This is critical 1922 // for both unions and structs. A union needs a bitcast, a struct element 1923 // will need a bitcast if the LLVM type laid out doesn't match the desired 1924 // type. 1925 addr = EmitBitCastOfLValueToProperType(*this, addr, 1926 CGM.getTypes().ConvertTypeForMem(type), 1927 field->getName()); 1928 1929 if (field->hasAttr<AnnotateAttr>()) 1930 addr = EmitFieldAnnotations(field, addr); 1931 1932 LValue LV = MakeAddrLValue(addr, type, alignment); 1933 LV.getQuals().addCVRQualifiers(cvr); 1934 1935 // __weak attribute on a field is ignored. 1936 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 1937 LV.getQuals().removeObjCGCAttr(); 1938 1939 // Fields of may_alias structs act like 'char' for TBAA purposes. 1940 // FIXME: this should get propagated down through anonymous structs 1941 // and unions. 1942 if (mayAlias && LV.getTBAAInfo()) 1943 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 1944 1945 return LV; 1946} 1947 1948LValue 1949CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue, 1950 const FieldDecl *Field, 1951 unsigned CVRQualifiers) { 1952 QualType FieldType = Field->getType(); 1953 1954 if (!FieldType->isReferenceType()) 1955 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1956 1957 const CGRecordLayout &RL = 1958 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1959 unsigned idx = RL.getLLVMFieldNo(Field); 1960 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx); 1961 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1962 1963 1964 // Make sure that the address is pointing to the right type. This is critical 1965 // for both unions and structs. A union needs a bitcast, a struct element 1966 // will need a bitcast if the LLVM type laid out doesn't match the desired 1967 // type. 1968 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 1969 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1970 V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS)); 1971 1972 CharUnits Alignment = getContext().getDeclAlign(Field); 1973 return MakeAddrLValue(V, FieldType, Alignment); 1974} 1975 1976LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 1977 if (E->isFileScope()) { 1978 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 1979 return MakeAddrLValue(GlobalPtr, E->getType()); 1980 } 1981 1982 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1983 const Expr *InitExpr = E->getInitializer(); 1984 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 1985 1986 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 1987 /*Init*/ true); 1988 1989 return Result; 1990} 1991 1992LValue CodeGenFunction:: 1993EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 1994 if (!expr->isGLValue()) { 1995 // ?: here should be an aggregate. 1996 assert((hasAggregateLLVMType(expr->getType()) && 1997 !expr->getType()->isAnyComplexType()) && 1998 "Unexpected conditional operator!"); 1999 return EmitAggExprToLValue(expr); 2000 } 2001 2002 const Expr *condExpr = expr->getCond(); 2003 bool CondExprBool; 2004 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 2005 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 2006 if (!CondExprBool) std::swap(live, dead); 2007 2008 if (!ContainsLabel(dead)) 2009 return EmitLValue(live); 2010 } 2011 2012 OpaqueValueMapping binding(*this, expr); 2013 2014 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 2015 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 2016 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 2017 2018 ConditionalEvaluation eval(*this); 2019 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 2020 2021 // Any temporaries created here are conditional. 2022 EmitBlock(lhsBlock); 2023 eval.begin(*this); 2024 LValue lhs = EmitLValue(expr->getTrueExpr()); 2025 eval.end(*this); 2026 2027 if (!lhs.isSimple()) 2028 return EmitUnsupportedLValue(expr, "conditional operator"); 2029 2030 lhsBlock = Builder.GetInsertBlock(); 2031 Builder.CreateBr(contBlock); 2032 2033 // Any temporaries created here are conditional. 2034 EmitBlock(rhsBlock); 2035 eval.begin(*this); 2036 LValue rhs = EmitLValue(expr->getFalseExpr()); 2037 eval.end(*this); 2038 if (!rhs.isSimple()) 2039 return EmitUnsupportedLValue(expr, "conditional operator"); 2040 rhsBlock = Builder.GetInsertBlock(); 2041 2042 EmitBlock(contBlock); 2043 2044 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2045 "cond-lvalue"); 2046 phi->addIncoming(lhs.getAddress(), lhsBlock); 2047 phi->addIncoming(rhs.getAddress(), rhsBlock); 2048 return MakeAddrLValue(phi, expr->getType()); 2049} 2050 2051/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 2052/// If the cast is a dynamic_cast, we can have the usual lvalue result, 2053/// otherwise if a cast is needed by the code generator in an lvalue context, 2054/// then it must mean that we need the address of an aggregate in order to 2055/// access one of its fields. This can happen for all the reasons that casts 2056/// are permitted with aggregate result, including noop aggregate casts, and 2057/// cast from scalar to union. 2058LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2059 switch (E->getCastKind()) { 2060 case CK_ToVoid: 2061 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2062 2063 case CK_Dependent: 2064 llvm_unreachable("dependent cast kind in IR gen!"); 2065 2066 // These two casts are currently treated as no-ops, although they could 2067 // potentially be real operations depending on the target's ABI. 2068 case CK_NonAtomicToAtomic: 2069 case CK_AtomicToNonAtomic: 2070 2071 case CK_NoOp: 2072 case CK_LValueToRValue: 2073 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2074 || E->getType()->isRecordType()) 2075 return EmitLValue(E->getSubExpr()); 2076 // Fall through to synthesize a temporary. 2077 2078 case CK_BitCast: 2079 case CK_ArrayToPointerDecay: 2080 case CK_FunctionToPointerDecay: 2081 case CK_NullToMemberPointer: 2082 case CK_NullToPointer: 2083 case CK_IntegralToPointer: 2084 case CK_PointerToIntegral: 2085 case CK_PointerToBoolean: 2086 case CK_VectorSplat: 2087 case CK_IntegralCast: 2088 case CK_IntegralToBoolean: 2089 case CK_IntegralToFloating: 2090 case CK_FloatingToIntegral: 2091 case CK_FloatingToBoolean: 2092 case CK_FloatingCast: 2093 case CK_FloatingRealToComplex: 2094 case CK_FloatingComplexToReal: 2095 case CK_FloatingComplexToBoolean: 2096 case CK_FloatingComplexCast: 2097 case CK_FloatingComplexToIntegralComplex: 2098 case CK_IntegralRealToComplex: 2099 case CK_IntegralComplexToReal: 2100 case CK_IntegralComplexToBoolean: 2101 case CK_IntegralComplexCast: 2102 case CK_IntegralComplexToFloatingComplex: 2103 case CK_DerivedToBaseMemberPointer: 2104 case CK_BaseToDerivedMemberPointer: 2105 case CK_MemberPointerToBoolean: 2106 case CK_AnyPointerToBlockPointerCast: 2107 case CK_ARCProduceObject: 2108 case CK_ARCConsumeObject: 2109 case CK_ARCReclaimReturnedObject: 2110 case CK_ARCExtendBlockObject: { 2111 // These casts only produce lvalues when we're binding a reference to a 2112 // temporary realized from a (converted) pure rvalue. Emit the expression 2113 // as a value, copy it into a temporary, and return an lvalue referring to 2114 // that temporary. 2115 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2116 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2117 return MakeAddrLValue(V, E->getType()); 2118 } 2119 2120 case CK_Dynamic: { 2121 LValue LV = EmitLValue(E->getSubExpr()); 2122 llvm::Value *V = LV.getAddress(); 2123 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2124 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2125 } 2126 2127 case CK_ConstructorConversion: 2128 case CK_UserDefinedConversion: 2129 case CK_CPointerToObjCPointerCast: 2130 case CK_BlockPointerToObjCPointerCast: 2131 return EmitLValue(E->getSubExpr()); 2132 2133 case CK_UncheckedDerivedToBase: 2134 case CK_DerivedToBase: { 2135 const RecordType *DerivedClassTy = 2136 E->getSubExpr()->getType()->getAs<RecordType>(); 2137 CXXRecordDecl *DerivedClassDecl = 2138 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2139 2140 LValue LV = EmitLValue(E->getSubExpr()); 2141 llvm::Value *This = LV.getAddress(); 2142 2143 // Perform the derived-to-base conversion 2144 llvm::Value *Base = 2145 GetAddressOfBaseClass(This, DerivedClassDecl, 2146 E->path_begin(), E->path_end(), 2147 /*NullCheckValue=*/false); 2148 2149 return MakeAddrLValue(Base, E->getType()); 2150 } 2151 case CK_ToUnion: 2152 return EmitAggExprToLValue(E); 2153 case CK_BaseToDerived: { 2154 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2155 CXXRecordDecl *DerivedClassDecl = 2156 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2157 2158 LValue LV = EmitLValue(E->getSubExpr()); 2159 2160 // Perform the base-to-derived conversion 2161 llvm::Value *Derived = 2162 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2163 E->path_begin(), E->path_end(), 2164 /*NullCheckValue=*/false); 2165 2166 return MakeAddrLValue(Derived, E->getType()); 2167 } 2168 case CK_LValueBitCast: { 2169 // This must be a reinterpret_cast (or c-style equivalent). 2170 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2171 2172 LValue LV = EmitLValue(E->getSubExpr()); 2173 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2174 ConvertType(CE->getTypeAsWritten())); 2175 return MakeAddrLValue(V, E->getType()); 2176 } 2177 case CK_ObjCObjectLValueCast: { 2178 LValue LV = EmitLValue(E->getSubExpr()); 2179 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2180 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2181 ConvertType(ToType)); 2182 return MakeAddrLValue(V, E->getType()); 2183 } 2184 } 2185 2186 llvm_unreachable("Unhandled lvalue cast kind?"); 2187} 2188 2189LValue CodeGenFunction::EmitNullInitializationLValue( 2190 const CXXScalarValueInitExpr *E) { 2191 QualType Ty = E->getType(); 2192 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2193 EmitNullInitialization(LV.getAddress(), Ty); 2194 return LV; 2195} 2196 2197LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2198 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2199 return getOpaqueLValueMapping(e); 2200} 2201 2202LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2203 const MaterializeTemporaryExpr *E) { 2204 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2205 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2206} 2207 2208 2209//===--------------------------------------------------------------------===// 2210// Expression Emission 2211//===--------------------------------------------------------------------===// 2212 2213RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2214 ReturnValueSlot ReturnValue) { 2215 if (CGDebugInfo *DI = getDebugInfo()) 2216 DI->EmitLocation(Builder, E->getLocStart()); 2217 2218 // Builtins never have block type. 2219 if (E->getCallee()->getType()->isBlockPointerType()) 2220 return EmitBlockCallExpr(E, ReturnValue); 2221 2222 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2223 return EmitCXXMemberCallExpr(CE, ReturnValue); 2224 2225 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2226 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2227 2228 const Decl *TargetDecl = E->getCalleeDecl(); 2229 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2230 if (unsigned builtinID = FD->getBuiltinID()) 2231 return EmitBuiltinExpr(FD, builtinID, E); 2232 } 2233 2234 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2235 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2236 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2237 2238 if (const CXXPseudoDestructorExpr *PseudoDtor 2239 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2240 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2241 if (getContext().getLangOptions().ObjCAutoRefCount && 2242 DestroyedType->isObjCLifetimeType() && 2243 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2244 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2245 // Automatic Reference Counting: 2246 // If the pseudo-expression names a retainable object with weak or 2247 // strong lifetime, the object shall be released. 2248 Expr *BaseExpr = PseudoDtor->getBase(); 2249 llvm::Value *BaseValue = NULL; 2250 Qualifiers BaseQuals; 2251 2252 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2253 if (PseudoDtor->isArrow()) { 2254 BaseValue = EmitScalarExpr(BaseExpr); 2255 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2256 BaseQuals = PTy->getPointeeType().getQualifiers(); 2257 } else { 2258 LValue BaseLV = EmitLValue(BaseExpr); 2259 BaseValue = BaseLV.getAddress(); 2260 QualType BaseTy = BaseExpr->getType(); 2261 BaseQuals = BaseTy.getQualifiers(); 2262 } 2263 2264 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2265 case Qualifiers::OCL_None: 2266 case Qualifiers::OCL_ExplicitNone: 2267 case Qualifiers::OCL_Autoreleasing: 2268 break; 2269 2270 case Qualifiers::OCL_Strong: 2271 EmitARCRelease(Builder.CreateLoad(BaseValue, 2272 PseudoDtor->getDestroyedType().isVolatileQualified()), 2273 /*precise*/ true); 2274 break; 2275 2276 case Qualifiers::OCL_Weak: 2277 EmitARCDestroyWeak(BaseValue); 2278 break; 2279 } 2280 } else { 2281 // C++ [expr.pseudo]p1: 2282 // The result shall only be used as the operand for the function call 2283 // operator (), and the result of such a call has type void. The only 2284 // effect is the evaluation of the postfix-expression before the dot or 2285 // arrow. 2286 EmitScalarExpr(E->getCallee()); 2287 } 2288 2289 return RValue::get(0); 2290 } 2291 2292 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2293 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2294 E->arg_begin(), E->arg_end(), TargetDecl); 2295} 2296 2297LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2298 // Comma expressions just emit their LHS then their RHS as an l-value. 2299 if (E->getOpcode() == BO_Comma) { 2300 EmitIgnoredExpr(E->getLHS()); 2301 EnsureInsertPoint(); 2302 return EmitLValue(E->getRHS()); 2303 } 2304 2305 if (E->getOpcode() == BO_PtrMemD || 2306 E->getOpcode() == BO_PtrMemI) 2307 return EmitPointerToDataMemberBinaryExpr(E); 2308 2309 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2310 2311 // Note that in all of these cases, __block variables need the RHS 2312 // evaluated first just in case the variable gets moved by the RHS. 2313 2314 if (!hasAggregateLLVMType(E->getType())) { 2315 switch (E->getLHS()->getType().getObjCLifetime()) { 2316 case Qualifiers::OCL_Strong: 2317 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2318 2319 case Qualifiers::OCL_Autoreleasing: 2320 return EmitARCStoreAutoreleasing(E).first; 2321 2322 // No reason to do any of these differently. 2323 case Qualifiers::OCL_None: 2324 case Qualifiers::OCL_ExplicitNone: 2325 case Qualifiers::OCL_Weak: 2326 break; 2327 } 2328 2329 RValue RV = EmitAnyExpr(E->getRHS()); 2330 LValue LV = EmitLValue(E->getLHS()); 2331 EmitStoreThroughLValue(RV, LV); 2332 return LV; 2333 } 2334 2335 if (E->getType()->isAnyComplexType()) 2336 return EmitComplexAssignmentLValue(E); 2337 2338 return EmitAggExprToLValue(E); 2339} 2340 2341LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2342 RValue RV = EmitCallExpr(E); 2343 2344 if (!RV.isScalar()) 2345 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2346 2347 assert(E->getCallReturnType()->isReferenceType() && 2348 "Can't have a scalar return unless the return type is a " 2349 "reference type!"); 2350 2351 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2352} 2353 2354LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2355 // FIXME: This shouldn't require another copy. 2356 return EmitAggExprToLValue(E); 2357} 2358 2359LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2360 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2361 && "binding l-value to type which needs a temporary"); 2362 AggValueSlot Slot = CreateAggTemp(E->getType()); 2363 EmitCXXConstructExpr(E, Slot); 2364 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2365} 2366 2367LValue 2368CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2369 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2370} 2371 2372LValue 2373CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2374 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2375 Slot.setExternallyDestructed(); 2376 EmitAggExpr(E->getSubExpr(), Slot); 2377 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 2378 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2379} 2380 2381LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2382 RValue RV = EmitObjCMessageExpr(E); 2383 2384 if (!RV.isScalar()) 2385 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2386 2387 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2388 "Can't have a scalar return unless the return type is a " 2389 "reference type!"); 2390 2391 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2392} 2393 2394LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2395 llvm::Value *V = 2396 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2397 return MakeAddrLValue(V, E->getType()); 2398} 2399 2400llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2401 const ObjCIvarDecl *Ivar) { 2402 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2403} 2404 2405LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2406 llvm::Value *BaseValue, 2407 const ObjCIvarDecl *Ivar, 2408 unsigned CVRQualifiers) { 2409 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2410 Ivar, CVRQualifiers); 2411} 2412 2413LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2414 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2415 llvm::Value *BaseValue = 0; 2416 const Expr *BaseExpr = E->getBase(); 2417 Qualifiers BaseQuals; 2418 QualType ObjectTy; 2419 if (E->isArrow()) { 2420 BaseValue = EmitScalarExpr(BaseExpr); 2421 ObjectTy = BaseExpr->getType()->getPointeeType(); 2422 BaseQuals = ObjectTy.getQualifiers(); 2423 } else { 2424 LValue BaseLV = EmitLValue(BaseExpr); 2425 // FIXME: this isn't right for bitfields. 2426 BaseValue = BaseLV.getAddress(); 2427 ObjectTy = BaseExpr->getType(); 2428 BaseQuals = ObjectTy.getQualifiers(); 2429 } 2430 2431 LValue LV = 2432 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2433 BaseQuals.getCVRQualifiers()); 2434 setObjCGCLValueClass(getContext(), E, LV); 2435 return LV; 2436} 2437 2438LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2439 // Can only get l-value for message expression returning aggregate type 2440 RValue RV = EmitAnyExprToTemp(E); 2441 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2442} 2443 2444RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2445 ReturnValueSlot ReturnValue, 2446 CallExpr::const_arg_iterator ArgBeg, 2447 CallExpr::const_arg_iterator ArgEnd, 2448 const Decl *TargetDecl) { 2449 // Get the actual function type. The callee type will always be a pointer to 2450 // function type or a block pointer type. 2451 assert(CalleeType->isFunctionPointerType() && 2452 "Call must have function pointer type!"); 2453 2454 CalleeType = getContext().getCanonicalType(CalleeType); 2455 2456 const FunctionType *FnType 2457 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2458 2459 CallArgList Args; 2460 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2461 2462 const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType); 2463 2464 // C99 6.5.2.2p6: 2465 // If the expression that denotes the called function has a type 2466 // that does not include a prototype, [the default argument 2467 // promotions are performed]. If the number of arguments does not 2468 // equal the number of parameters, the behavior is undefined. If 2469 // the function is defined with a type that includes a prototype, 2470 // and either the prototype ends with an ellipsis (, ...) or the 2471 // types of the arguments after promotion are not compatible with 2472 // the types of the parameters, the behavior is undefined. If the 2473 // function is defined with a type that does not include a 2474 // prototype, and the types of the arguments after promotion are 2475 // not compatible with those of the parameters after promotion, 2476 // the behavior is undefined [except in some trivial cases]. 2477 // That is, in the general case, we should assume that a call 2478 // through an unprototyped function type works like a *non-variadic* 2479 // call. The way we make this work is to cast to the exact type 2480 // of the promoted arguments. 2481 if (isa<FunctionNoProtoType>(FnType) && 2482 !getTargetHooks().isNoProtoCallVariadic(FnInfo)) { 2483 assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0)) 2484 ->isVarArg()); 2485 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false); 2486 CalleeTy = CalleeTy->getPointerTo(); 2487 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2488 } 2489 2490 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2491} 2492 2493LValue CodeGenFunction:: 2494EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2495 llvm::Value *BaseV; 2496 if (E->getOpcode() == BO_PtrMemI) 2497 BaseV = EmitScalarExpr(E->getLHS()); 2498 else 2499 BaseV = EmitLValue(E->getLHS()).getAddress(); 2500 2501 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2502 2503 const MemberPointerType *MPT 2504 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2505 2506 llvm::Value *AddV = 2507 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2508 2509 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2510} 2511 2512static void 2513EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2514 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2515 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2516 if (E->isCmpXChg()) { 2517 // Note that cmpxchg only supports specifying one ordering and 2518 // doesn't support weak cmpxchg, at least at the moment. 2519 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2520 LoadVal1->setAlignment(Align); 2521 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 2522 LoadVal2->setAlignment(Align); 2523 llvm::AtomicCmpXchgInst *CXI = 2524 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 2525 CXI->setVolatile(E->isVolatile()); 2526 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 2527 StoreVal1->setAlignment(Align); 2528 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 2529 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 2530 return; 2531 } 2532 2533 if (E->getOp() == AtomicExpr::Load) { 2534 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 2535 Load->setAtomic(Order); 2536 Load->setAlignment(Size); 2537 Load->setVolatile(E->isVolatile()); 2538 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 2539 StoreDest->setAlignment(Align); 2540 return; 2541 } 2542 2543 if (E->getOp() == AtomicExpr::Store) { 2544 assert(!Dest && "Store does not return a value"); 2545 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2546 LoadVal1->setAlignment(Align); 2547 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 2548 Store->setAtomic(Order); 2549 Store->setAlignment(Size); 2550 Store->setVolatile(E->isVolatile()); 2551 return; 2552 } 2553 2554 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 2555 switch (E->getOp()) { 2556 case AtomicExpr::CmpXchgWeak: 2557 case AtomicExpr::CmpXchgStrong: 2558 case AtomicExpr::Store: 2559 case AtomicExpr::Init: 2560 case AtomicExpr::Load: assert(0 && "Already handled!"); 2561 case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; 2562 case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; 2563 case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; 2564 case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; 2565 case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; 2566 case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; 2567 } 2568 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2569 LoadVal1->setAlignment(Align); 2570 llvm::AtomicRMWInst *RMWI = 2571 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 2572 RMWI->setVolatile(E->isVolatile()); 2573 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); 2574 StoreDest->setAlignment(Align); 2575} 2576 2577// This function emits any expression (scalar, complex, or aggregate) 2578// into a temporary alloca. 2579static llvm::Value * 2580EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 2581 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 2582 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 2583 /*Init*/ true); 2584 return DeclPtr; 2585} 2586 2587static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 2588 llvm::Value *Dest) { 2589 if (Ty->isAnyComplexType()) 2590 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 2591 if (CGF.hasAggregateLLVMType(Ty)) 2592 return RValue::getAggregate(Dest); 2593 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 2594} 2595 2596RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 2597 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 2598 QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); 2599 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 2600 uint64_t Size = sizeChars.getQuantity(); 2601 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 2602 unsigned Align = alignChars.getQuantity(); 2603 unsigned MaxInlineWidth = 2604 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 2605 bool UseLibcall = (Size != Align || Size > MaxInlineWidth); 2606 2607 2608 2609 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 2610 Ptr = EmitScalarExpr(E->getPtr()); 2611 2612 if (E->getOp() == AtomicExpr::Init) { 2613 assert(!Dest && "Init does not return a value"); 2614 Val1 = EmitScalarExpr(E->getVal1()); 2615 llvm::StoreInst *Store = Builder.CreateStore(Val1, Ptr); 2616 Store->setAlignment(Size); 2617 Store->setVolatile(E->isVolatile()); 2618 return RValue::get(0); 2619 } 2620 2621 Order = EmitScalarExpr(E->getOrder()); 2622 if (E->isCmpXChg()) { 2623 Val1 = EmitScalarExpr(E->getVal1()); 2624 Val2 = EmitValToTemp(*this, E->getVal2()); 2625 OrderFail = EmitScalarExpr(E->getOrderFail()); 2626 (void)OrderFail; // OrderFail is unused at the moment 2627 } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && 2628 MemTy->isPointerType()) { 2629 // For pointers, we're required to do a bit of math: adding 1 to an int* 2630 // is not the same as adding 1 to a uintptr_t. 2631 QualType Val1Ty = E->getVal1()->getType(); 2632 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 2633 CharUnits PointeeIncAmt = 2634 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 2635 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 2636 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 2637 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 2638 } else if (E->getOp() != AtomicExpr::Load) { 2639 Val1 = EmitValToTemp(*this, E->getVal1()); 2640 } 2641 2642 if (E->getOp() != AtomicExpr::Store && !Dest) 2643 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 2644 2645 if (UseLibcall) { 2646 // FIXME: Finalize what the libcalls are actually supposed to look like. 2647 // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 2648 return EmitUnsupportedRValue(E, "atomic library call"); 2649 } 2650#if 0 2651 if (UseLibcall) { 2652 const char* LibCallName; 2653 switch (E->getOp()) { 2654 case AtomicExpr::CmpXchgWeak: 2655 LibCallName = "__atomic_compare_exchange_generic"; break; 2656 case AtomicExpr::CmpXchgStrong: 2657 LibCallName = "__atomic_compare_exchange_generic"; break; 2658 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 2659 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 2660 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 2661 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 2662 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 2663 case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break; 2664 case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break; 2665 case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break; 2666 } 2667 llvm::SmallVector<QualType, 4> Params; 2668 CallArgList Args; 2669 QualType RetTy = getContext().VoidTy; 2670 if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg()) 2671 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 2672 getContext().VoidPtrTy); 2673 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 2674 getContext().VoidPtrTy); 2675 if (E->getOp() != AtomicExpr::Load) 2676 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 2677 getContext().VoidPtrTy); 2678 if (E->isCmpXChg()) { 2679 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 2680 getContext().VoidPtrTy); 2681 RetTy = getContext().IntTy; 2682 } 2683 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 2684 getContext().getSizeType()); 2685 const CGFunctionInfo &FuncInfo = 2686 CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo()); 2687 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false); 2688 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 2689 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 2690 if (E->isCmpXChg()) 2691 return Res; 2692 if (E->getOp() == AtomicExpr::Store) 2693 return RValue::get(0); 2694 return ConvertTempToRValue(*this, E->getType(), Dest); 2695 } 2696#endif 2697 llvm::Type *IPtrTy = 2698 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 2699 llvm::Value *OrigDest = Dest; 2700 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 2701 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 2702 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 2703 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 2704 2705 if (isa<llvm::ConstantInt>(Order)) { 2706 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 2707 switch (ord) { 2708 case 0: // memory_order_relaxed 2709 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2710 llvm::Monotonic); 2711 break; 2712 case 1: // memory_order_consume 2713 case 2: // memory_order_acquire 2714 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2715 llvm::Acquire); 2716 break; 2717 case 3: // memory_order_release 2718 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2719 llvm::Release); 2720 break; 2721 case 4: // memory_order_acq_rel 2722 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2723 llvm::AcquireRelease); 2724 break; 2725 case 5: // memory_order_seq_cst 2726 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2727 llvm::SequentiallyConsistent); 2728 break; 2729 default: // invalid order 2730 // We should not ever get here normally, but it's hard to 2731 // enforce that in general. 2732 break; 2733 } 2734 if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init) 2735 return RValue::get(0); 2736 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2737 } 2738 2739 // Long case, when Order isn't obviously constant. 2740 2741 // Create all the relevant BB's 2742 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 2743 *AcqRelBB = 0, *SeqCstBB = 0; 2744 MonotonicBB = createBasicBlock("monotonic", CurFn); 2745 if (E->getOp() != AtomicExpr::Store) 2746 AcquireBB = createBasicBlock("acquire", CurFn); 2747 if (E->getOp() != AtomicExpr::Load) 2748 ReleaseBB = createBasicBlock("release", CurFn); 2749 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) 2750 AcqRelBB = createBasicBlock("acqrel", CurFn); 2751 SeqCstBB = createBasicBlock("seqcst", CurFn); 2752 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 2753 2754 // Create the switch for the split 2755 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 2756 // doesn't matter unless someone is crazy enough to use something that 2757 // doesn't fold to a constant for the ordering. 2758 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 2759 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 2760 2761 // Emit all the different atomics 2762 Builder.SetInsertPoint(MonotonicBB); 2763 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2764 llvm::Monotonic); 2765 Builder.CreateBr(ContBB); 2766 if (E->getOp() != AtomicExpr::Store) { 2767 Builder.SetInsertPoint(AcquireBB); 2768 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2769 llvm::Acquire); 2770 Builder.CreateBr(ContBB); 2771 SI->addCase(Builder.getInt32(1), AcquireBB); 2772 SI->addCase(Builder.getInt32(2), AcquireBB); 2773 } 2774 if (E->getOp() != AtomicExpr::Load) { 2775 Builder.SetInsertPoint(ReleaseBB); 2776 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2777 llvm::Release); 2778 Builder.CreateBr(ContBB); 2779 SI->addCase(Builder.getInt32(3), ReleaseBB); 2780 } 2781 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) { 2782 Builder.SetInsertPoint(AcqRelBB); 2783 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2784 llvm::AcquireRelease); 2785 Builder.CreateBr(ContBB); 2786 SI->addCase(Builder.getInt32(4), AcqRelBB); 2787 } 2788 Builder.SetInsertPoint(SeqCstBB); 2789 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2790 llvm::SequentiallyConsistent); 2791 Builder.CreateBr(ContBB); 2792 SI->addCase(Builder.getInt32(5), SeqCstBB); 2793 2794 // Cleanup and return 2795 Builder.SetInsertPoint(ContBB); 2796 if (E->getOp() == AtomicExpr::Store) 2797 return RValue::get(0); 2798 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2799} 2800 2801void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN, 2802 unsigned AccuracyD) { 2803 assert(Val->getType()->isFPOrFPVectorTy()); 2804 if (!AccuracyN || !isa<llvm::Instruction>(Val)) 2805 return; 2806 2807 llvm::Value *Vals[2]; 2808 Vals[0] = llvm::ConstantInt::get(Int32Ty, AccuracyN); 2809 Vals[1] = llvm::ConstantInt::get(Int32Ty, AccuracyD); 2810 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), Vals); 2811 2812 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy, 2813 Node); 2814} 2815 2816namespace { 2817 struct LValueOrRValue { 2818 LValue LV; 2819 RValue RV; 2820 }; 2821} 2822 2823static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 2824 const PseudoObjectExpr *E, 2825 bool forLValue, 2826 AggValueSlot slot) { 2827 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2828 2829 // Find the result expression, if any. 2830 const Expr *resultExpr = E->getResultExpr(); 2831 LValueOrRValue result; 2832 2833 for (PseudoObjectExpr::const_semantics_iterator 2834 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2835 const Expr *semantic = *i; 2836 2837 // If this semantic expression is an opaque value, bind it 2838 // to the result of its source expression. 2839 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2840 2841 // If this is the result expression, we may need to evaluate 2842 // directly into the slot. 2843 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2844 OVMA opaqueData; 2845 if (ov == resultExpr && ov->isRValue() && !forLValue && 2846 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 2847 !ov->getType()->isAnyComplexType()) { 2848 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 2849 2850 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 2851 opaqueData = OVMA::bind(CGF, ov, LV); 2852 result.RV = slot.asRValue(); 2853 2854 // Otherwise, emit as normal. 2855 } else { 2856 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2857 2858 // If this is the result, also evaluate the result now. 2859 if (ov == resultExpr) { 2860 if (forLValue) 2861 result.LV = CGF.EmitLValue(ov); 2862 else 2863 result.RV = CGF.EmitAnyExpr(ov, slot); 2864 } 2865 } 2866 2867 opaques.push_back(opaqueData); 2868 2869 // Otherwise, if the expression is the result, evaluate it 2870 // and remember the result. 2871 } else if (semantic == resultExpr) { 2872 if (forLValue) 2873 result.LV = CGF.EmitLValue(semantic); 2874 else 2875 result.RV = CGF.EmitAnyExpr(semantic, slot); 2876 2877 // Otherwise, evaluate the expression in an ignored context. 2878 } else { 2879 CGF.EmitIgnoredExpr(semantic); 2880 } 2881 } 2882 2883 // Unbind all the opaques now. 2884 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2885 opaques[i].unbind(CGF); 2886 2887 return result; 2888} 2889 2890RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 2891 AggValueSlot slot) { 2892 return emitPseudoObjectExpr(*this, E, false, slot).RV; 2893} 2894 2895LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 2896 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 2897} 2898