CGExpr.cpp revision c637d738897b1745af3bad7fc551f26b98da838c
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGCall.h" 17#include "CGCXXABI.h" 18#include "CGDebugInfo.h" 19#include "CGRecordLayout.h" 20#include "CGObjCRuntime.h" 21#include "TargetInfo.h" 22#include "clang/AST/ASTContext.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Frontend/CodeGenOptions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Target/TargetData.h" 28using namespace clang; 29using namespace CodeGen; 30 31//===--------------------------------------------------------------------===// 32// Miscellaneous Helper Methods 33//===--------------------------------------------------------------------===// 34 35llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 36 unsigned addressSpace = 37 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 38 39 llvm::PointerType *destType = Int8PtrTy; 40 if (addressSpace) 41 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 42 43 if (value->getType() == destType) return value; 44 return Builder.CreateBitCast(value, destType); 45} 46 47/// CreateTempAlloca - This creates a alloca and inserts it into the entry 48/// block. 49llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 50 const Twine &Name) { 51 if (!Builder.isNamePreserving()) 52 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 53 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 54} 55 56void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 57 llvm::Value *Init) { 58 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 59 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 60 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 61} 62 63llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 64 const Twine &Name) { 65 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 66 // FIXME: Should we prefer the preferred type alignment here? 67 CharUnits Align = getContext().getTypeAlignInChars(Ty); 68 Alloc->setAlignment(Align.getQuantity()); 69 return Alloc; 70} 71 72llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 73 const Twine &Name) { 74 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 75 // FIXME: Should we prefer the preferred type alignment here? 76 CharUnits Align = getContext().getTypeAlignInChars(Ty); 77 Alloc->setAlignment(Align.getQuantity()); 78 return Alloc; 79} 80 81/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 82/// expression and compare the result against zero, returning an Int1Ty value. 83llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 84 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 85 llvm::Value *MemPtr = EmitScalarExpr(E); 86 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 87 } 88 89 QualType BoolTy = getContext().BoolTy; 90 if (!E->getType()->isAnyComplexType()) 91 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 92 93 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 94} 95 96/// EmitIgnoredExpr - Emit code to compute the specified expression, 97/// ignoring the result. 98void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 99 if (E->isRValue()) 100 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 101 102 // Just emit it as an l-value and drop the result. 103 EmitLValue(E); 104} 105 106/// EmitAnyExpr - Emit code to compute the specified expression which 107/// can have any type. The result is returned as an RValue struct. 108/// If this is an aggregate expression, AggSlot indicates where the 109/// result should be returned. 110RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot, 111 bool IgnoreResult) { 112 if (!hasAggregateLLVMType(E->getType())) 113 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 114 else if (E->getType()->isAnyComplexType()) 115 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult)); 116 117 EmitAggExpr(E, AggSlot, IgnoreResult); 118 return AggSlot.asRValue(); 119} 120 121/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 122/// always be accessible even if no aggregate location is provided. 123RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 124 AggValueSlot AggSlot = AggValueSlot::ignored(); 125 126 if (hasAggregateLLVMType(E->getType()) && 127 !E->getType()->isAnyComplexType()) 128 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 129 return EmitAnyExpr(E, AggSlot); 130} 131 132/// EmitAnyExprToMem - Evaluate an expression into a given memory 133/// location. 134void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 135 llvm::Value *Location, 136 Qualifiers Quals, 137 bool IsInit) { 138 if (E->getType()->isAnyComplexType()) 139 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 140 else if (hasAggregateLLVMType(E->getType())) 141 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, 142 AggValueSlot::IsDestructed_t(IsInit), 143 AggValueSlot::DoesNotNeedGCBarriers, 144 AggValueSlot::IsAliased_t(!IsInit))); 145 else { 146 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 147 LValue LV = MakeAddrLValue(Location, E->getType()); 148 EmitStoreThroughLValue(RV, LV); 149 } 150} 151 152namespace { 153/// \brief An adjustment to be made to the temporary created when emitting a 154/// reference binding, which accesses a particular subobject of that temporary. 155 struct SubobjectAdjustment { 156 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 157 158 union { 159 struct { 160 const CastExpr *BasePath; 161 const CXXRecordDecl *DerivedClass; 162 } DerivedToBase; 163 164 FieldDecl *Field; 165 }; 166 167 SubobjectAdjustment(const CastExpr *BasePath, 168 const CXXRecordDecl *DerivedClass) 169 : Kind(DerivedToBaseAdjustment) { 170 DerivedToBase.BasePath = BasePath; 171 DerivedToBase.DerivedClass = DerivedClass; 172 } 173 174 SubobjectAdjustment(FieldDecl *Field) 175 : Kind(FieldAdjustment) { 176 this->Field = Field; 177 } 178 }; 179} 180 181static llvm::Value * 182CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 183 const NamedDecl *InitializedDecl) { 184 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 185 if (VD->hasGlobalStorage()) { 186 llvm::SmallString<256> Name; 187 llvm::raw_svector_ostream Out(Name); 188 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 189 Out.flush(); 190 191 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 192 193 // Create the reference temporary. 194 llvm::GlobalValue *RefTemp = 195 new llvm::GlobalVariable(CGF.CGM.getModule(), 196 RefTempTy, /*isConstant=*/false, 197 llvm::GlobalValue::InternalLinkage, 198 llvm::Constant::getNullValue(RefTempTy), 199 Name.str()); 200 return RefTemp; 201 } 202 } 203 204 return CGF.CreateMemTemp(Type, "ref.tmp"); 205} 206 207static llvm::Value * 208EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 209 llvm::Value *&ReferenceTemporary, 210 const CXXDestructorDecl *&ReferenceTemporaryDtor, 211 QualType &ObjCARCReferenceLifetimeType, 212 const NamedDecl *InitializedDecl) { 213 // Look through expressions for materialized temporaries (for now). 214 if (const MaterializeTemporaryExpr *M 215 = dyn_cast<MaterializeTemporaryExpr>(E)) { 216 // Objective-C++ ARC: 217 // If we are binding a reference to a temporary that has ownership, we 218 // need to perform retain/release operations on the temporary. 219 if (CGF.getContext().getLangOptions().ObjCAutoRefCount && 220 E->getType()->isObjCLifetimeType() && 221 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 222 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 223 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 224 ObjCARCReferenceLifetimeType = E->getType(); 225 226 E = M->GetTemporaryExpr(); 227 } 228 229 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 230 E = DAE->getExpr(); 231 232 if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) { 233 CodeGenFunction::RunCleanupsScope Scope(CGF); 234 235 return EmitExprForReferenceBinding(CGF, TE->getSubExpr(), 236 ReferenceTemporary, 237 ReferenceTemporaryDtor, 238 ObjCARCReferenceLifetimeType, 239 InitializedDecl); 240 } 241 242 if (const ObjCPropertyRefExpr *PRE = 243 dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts())) 244 if (PRE->getGetterResultType()->isReferenceType()) 245 E = PRE; 246 247 RValue RV; 248 if (E->isGLValue()) { 249 // Emit the expression as an lvalue. 250 LValue LV = CGF.EmitLValue(E); 251 if (LV.isPropertyRef()) { 252 RV = CGF.EmitLoadOfPropertyRefLValue(LV); 253 return RV.getScalarVal(); 254 } 255 256 if (LV.isSimple()) 257 return LV.getAddress(); 258 259 // We have to load the lvalue. 260 RV = CGF.EmitLoadOfLValue(LV); 261 } else { 262 if (!ObjCARCReferenceLifetimeType.isNull()) { 263 ReferenceTemporary = CreateReferenceTemporary(CGF, 264 ObjCARCReferenceLifetimeType, 265 InitializedDecl); 266 267 268 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 269 ObjCARCReferenceLifetimeType); 270 271 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 272 RefTempDst, false); 273 274 bool ExtendsLifeOfTemporary = false; 275 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 276 if (Var->extendsLifetimeOfTemporary()) 277 ExtendsLifeOfTemporary = true; 278 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 279 ExtendsLifeOfTemporary = true; 280 } 281 282 if (!ExtendsLifeOfTemporary) { 283 // Since the lifetime of this temporary isn't going to be extended, 284 // we need to clean it up ourselves at the end of the full expression. 285 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 286 case Qualifiers::OCL_None: 287 case Qualifiers::OCL_ExplicitNone: 288 case Qualifiers::OCL_Autoreleasing: 289 break; 290 291 case Qualifiers::OCL_Strong: { 292 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 293 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 294 CGF.pushDestroy(cleanupKind, 295 ReferenceTemporary, 296 ObjCARCReferenceLifetimeType, 297 CodeGenFunction::destroyARCStrongImprecise, 298 cleanupKind & EHCleanup); 299 break; 300 } 301 302 case Qualifiers::OCL_Weak: 303 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 304 CGF.pushDestroy(NormalAndEHCleanup, 305 ReferenceTemporary, 306 ObjCARCReferenceLifetimeType, 307 CodeGenFunction::destroyARCWeak, 308 /*useEHCleanupForArray*/ true); 309 break; 310 } 311 312 ObjCARCReferenceLifetimeType = QualType(); 313 } 314 315 return ReferenceTemporary; 316 } 317 318 SmallVector<SubobjectAdjustment, 2> Adjustments; 319 while (true) { 320 E = E->IgnoreParens(); 321 322 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 323 if ((CE->getCastKind() == CK_DerivedToBase || 324 CE->getCastKind() == CK_UncheckedDerivedToBase) && 325 E->getType()->isRecordType()) { 326 E = CE->getSubExpr(); 327 CXXRecordDecl *Derived 328 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 329 Adjustments.push_back(SubobjectAdjustment(CE, Derived)); 330 continue; 331 } 332 333 if (CE->getCastKind() == CK_NoOp) { 334 E = CE->getSubExpr(); 335 continue; 336 } 337 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 338 if (!ME->isArrow() && ME->getBase()->isRValue()) { 339 assert(ME->getBase()->getType()->isRecordType()); 340 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 341 E = ME->getBase(); 342 Adjustments.push_back(SubobjectAdjustment(Field)); 343 continue; 344 } 345 } 346 } 347 348 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 349 if (opaque->getType()->isRecordType()) 350 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 351 352 // Nothing changed. 353 break; 354 } 355 356 // Create a reference temporary if necessary. 357 AggValueSlot AggSlot = AggValueSlot::ignored(); 358 if (CGF.hasAggregateLLVMType(E->getType()) && 359 !E->getType()->isAnyComplexType()) { 360 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 361 InitializedDecl); 362 AggValueSlot::IsDestructed_t isDestructed 363 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 364 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(), 365 isDestructed, 366 AggValueSlot::DoesNotNeedGCBarriers, 367 AggValueSlot::IsNotAliased); 368 } 369 370 if (InitializedDecl) { 371 // Get the destructor for the reference temporary. 372 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 373 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 374 if (!ClassDecl->hasTrivialDestructor()) 375 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 376 } 377 } 378 379 RV = CGF.EmitAnyExpr(E, AggSlot); 380 381 // Check if need to perform derived-to-base casts and/or field accesses, to 382 // get from the temporary object we created (and, potentially, for which we 383 // extended the lifetime) to the subobject we're binding the reference to. 384 if (!Adjustments.empty()) { 385 llvm::Value *Object = RV.getAggregateAddr(); 386 for (unsigned I = Adjustments.size(); I != 0; --I) { 387 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 388 switch (Adjustment.Kind) { 389 case SubobjectAdjustment::DerivedToBaseAdjustment: 390 Object = 391 CGF.GetAddressOfBaseClass(Object, 392 Adjustment.DerivedToBase.DerivedClass, 393 Adjustment.DerivedToBase.BasePath->path_begin(), 394 Adjustment.DerivedToBase.BasePath->path_end(), 395 /*NullCheckValue=*/false); 396 break; 397 398 case SubobjectAdjustment::FieldAdjustment: { 399 LValue LV = 400 CGF.EmitLValueForField(Object, Adjustment.Field, 0); 401 if (LV.isSimple()) { 402 Object = LV.getAddress(); 403 break; 404 } 405 406 // For non-simple lvalues, we actually have to create a copy of 407 // the object we're binding to. 408 QualType T = Adjustment.Field->getType().getNonReferenceType() 409 .getUnqualifiedType(); 410 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 411 LValue TempLV = CGF.MakeAddrLValue(Object, 412 Adjustment.Field->getType()); 413 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 414 break; 415 } 416 417 } 418 } 419 420 return Object; 421 } 422 } 423 424 if (RV.isAggregate()) 425 return RV.getAggregateAddr(); 426 427 // Create a temporary variable that we can bind the reference to. 428 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 429 InitializedDecl); 430 431 432 unsigned Alignment = 433 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 434 if (RV.isScalar()) 435 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 436 /*Volatile=*/false, Alignment, E->getType()); 437 else 438 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 439 /*Volatile=*/false); 440 return ReferenceTemporary; 441} 442 443RValue 444CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 445 const NamedDecl *InitializedDecl) { 446 llvm::Value *ReferenceTemporary = 0; 447 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 448 QualType ObjCARCReferenceLifetimeType; 449 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 450 ReferenceTemporaryDtor, 451 ObjCARCReferenceLifetimeType, 452 InitializedDecl); 453 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 454 return RValue::get(Value); 455 456 // Make sure to call the destructor for the reference temporary. 457 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 458 if (VD && VD->hasGlobalStorage()) { 459 if (ReferenceTemporaryDtor) { 460 llvm::Constant *DtorFn = 461 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 462 EmitCXXGlobalDtorRegistration(DtorFn, 463 cast<llvm::Constant>(ReferenceTemporary)); 464 } else { 465 assert(!ObjCARCReferenceLifetimeType.isNull()); 466 // Note: We intentionally do not register a global "destructor" to 467 // release the object. 468 } 469 470 return RValue::get(Value); 471 } 472 473 if (ReferenceTemporaryDtor) 474 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 475 else { 476 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 477 case Qualifiers::OCL_None: 478 llvm_unreachable( 479 "Not a reference temporary that needs to be deallocated"); 480 case Qualifiers::OCL_ExplicitNone: 481 case Qualifiers::OCL_Autoreleasing: 482 // Nothing to do. 483 break; 484 485 case Qualifiers::OCL_Strong: { 486 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 487 CleanupKind cleanupKind = getARCCleanupKind(); 488 // This local is a GCC and MSVC compiler workaround. 489 Destroyer *destroyer = precise ? &destroyARCStrongPrecise : 490 &destroyARCStrongImprecise; 491 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 492 *destroyer, cleanupKind & EHCleanup); 493 break; 494 } 495 496 case Qualifiers::OCL_Weak: { 497 // This local is a GCC and MSVC compiler workaround. 498 Destroyer *destroyer = &destroyARCWeak; 499 // __weak objects always get EH cleanups; otherwise, exceptions 500 // could cause really nasty crashes instead of mere leaks. 501 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 502 ObjCARCReferenceLifetimeType, *destroyer, true); 503 break; 504 } 505 } 506 } 507 508 return RValue::get(Value); 509} 510 511 512/// getAccessedFieldNo - Given an encoded value and a result number, return the 513/// input field number being accessed. 514unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 515 const llvm::Constant *Elts) { 516 if (isa<llvm::ConstantAggregateZero>(Elts)) 517 return 0; 518 519 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 520} 521 522void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 523 if (!CatchUndefined) 524 return; 525 526 // This needs to be to the standard address space. 527 Address = Builder.CreateBitCast(Address, Int8PtrTy); 528 529 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 530 531 // In time, people may want to control this and use a 1 here. 532 llvm::Value *Arg = Builder.getFalse(); 533 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 534 llvm::BasicBlock *Cont = createBasicBlock(); 535 llvm::BasicBlock *Check = createBasicBlock(); 536 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); 537 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 538 539 EmitBlock(Check); 540 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 541 llvm::ConstantInt::get(IntPtrTy, Size)), 542 Cont, getTrapBB()); 543 EmitBlock(Cont); 544} 545 546 547CodeGenFunction::ComplexPairTy CodeGenFunction:: 548EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 549 bool isInc, bool isPre) { 550 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 551 LV.isVolatileQualified()); 552 553 llvm::Value *NextVal; 554 if (isa<llvm::IntegerType>(InVal.first->getType())) { 555 uint64_t AmountVal = isInc ? 1 : -1; 556 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 557 558 // Add the inc/dec to the real part. 559 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 560 } else { 561 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 562 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 563 if (!isInc) 564 FVal.changeSign(); 565 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 566 567 // Add the inc/dec to the real part. 568 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 569 } 570 571 ComplexPairTy IncVal(NextVal, InVal.second); 572 573 // Store the updated result through the lvalue. 574 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 575 576 // If this is a postinc, return the value read from memory, otherwise use the 577 // updated value. 578 return isPre ? IncVal : InVal; 579} 580 581 582//===----------------------------------------------------------------------===// 583// LValue Expression Emission 584//===----------------------------------------------------------------------===// 585 586RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 587 if (Ty->isVoidType()) 588 return RValue::get(0); 589 590 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 591 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 592 llvm::Value *U = llvm::UndefValue::get(EltTy); 593 return RValue::getComplex(std::make_pair(U, U)); 594 } 595 596 // If this is a use of an undefined aggregate type, the aggregate must have an 597 // identifiable address. Just because the contents of the value are undefined 598 // doesn't mean that the address can't be taken and compared. 599 if (hasAggregateLLVMType(Ty)) { 600 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 601 return RValue::getAggregate(DestPtr); 602 } 603 604 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 605} 606 607RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 608 const char *Name) { 609 ErrorUnsupported(E, Name); 610 return GetUndefRValue(E->getType()); 611} 612 613LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 614 const char *Name) { 615 ErrorUnsupported(E, Name); 616 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 617 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 618} 619 620LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 621 LValue LV = EmitLValue(E); 622 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 623 EmitCheck(LV.getAddress(), 624 getContext().getTypeSizeInChars(E->getType()).getQuantity()); 625 return LV; 626} 627 628/// EmitLValue - Emit code to compute a designator that specifies the location 629/// of the expression. 630/// 631/// This can return one of two things: a simple address or a bitfield reference. 632/// In either case, the LLVM Value* in the LValue structure is guaranteed to be 633/// an LLVM pointer type. 634/// 635/// If this returns a bitfield reference, nothing about the pointee type of the 636/// LLVM value is known: For example, it may not be a pointer to an integer. 637/// 638/// If this returns a normal address, and if the lvalue's C type is fixed size, 639/// this method guarantees that the returned pointer type will point to an LLVM 640/// type of the same size of the lvalue's type. If the lvalue has a variable 641/// length type, this is not possible. 642/// 643LValue CodeGenFunction::EmitLValue(const Expr *E) { 644 switch (E->getStmtClass()) { 645 default: return EmitUnsupportedLValue(E, "l-value expression"); 646 647 case Expr::ObjCSelectorExprClass: 648 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 649 case Expr::ObjCIsaExprClass: 650 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 651 case Expr::BinaryOperatorClass: 652 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 653 case Expr::CompoundAssignOperatorClass: 654 if (!E->getType()->isAnyComplexType()) 655 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 656 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 657 case Expr::CallExprClass: 658 case Expr::CXXMemberCallExprClass: 659 case Expr::CXXOperatorCallExprClass: 660 return EmitCallExprLValue(cast<CallExpr>(E)); 661 case Expr::VAArgExprClass: 662 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 663 case Expr::DeclRefExprClass: 664 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 665 case Expr::ParenExprClass: 666 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 667 case Expr::GenericSelectionExprClass: 668 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 669 case Expr::PredefinedExprClass: 670 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 671 case Expr::StringLiteralClass: 672 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 673 case Expr::ObjCEncodeExprClass: 674 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 675 676 case Expr::BlockDeclRefExprClass: 677 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 678 679 case Expr::CXXTemporaryObjectExprClass: 680 case Expr::CXXConstructExprClass: 681 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 682 case Expr::CXXBindTemporaryExprClass: 683 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 684 case Expr::ExprWithCleanupsClass: 685 return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E)); 686 case Expr::CXXScalarValueInitExprClass: 687 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 688 case Expr::CXXDefaultArgExprClass: 689 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 690 case Expr::CXXTypeidExprClass: 691 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 692 693 case Expr::ObjCMessageExprClass: 694 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 695 case Expr::ObjCIvarRefExprClass: 696 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 697 case Expr::ObjCPropertyRefExprClass: 698 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 699 case Expr::StmtExprClass: 700 return EmitStmtExprLValue(cast<StmtExpr>(E)); 701 case Expr::UnaryOperatorClass: 702 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 703 case Expr::ArraySubscriptExprClass: 704 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 705 case Expr::ExtVectorElementExprClass: 706 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 707 case Expr::MemberExprClass: 708 return EmitMemberExpr(cast<MemberExpr>(E)); 709 case Expr::CompoundLiteralExprClass: 710 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 711 case Expr::ConditionalOperatorClass: 712 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 713 case Expr::BinaryConditionalOperatorClass: 714 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 715 case Expr::ChooseExprClass: 716 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 717 case Expr::OpaqueValueExprClass: 718 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 719 case Expr::SubstNonTypeTemplateParmExprClass: 720 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 721 case Expr::ImplicitCastExprClass: 722 case Expr::CStyleCastExprClass: 723 case Expr::CXXFunctionalCastExprClass: 724 case Expr::CXXStaticCastExprClass: 725 case Expr::CXXDynamicCastExprClass: 726 case Expr::CXXReinterpretCastExprClass: 727 case Expr::CXXConstCastExprClass: 728 case Expr::ObjCBridgedCastExprClass: 729 return EmitCastLValue(cast<CastExpr>(E)); 730 731 case Expr::MaterializeTemporaryExprClass: 732 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 733 } 734} 735 736llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 737 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 738 lvalue.getAlignment(), lvalue.getType(), 739 lvalue.getTBAAInfo()); 740} 741 742llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 743 unsigned Alignment, QualType Ty, 744 llvm::MDNode *TBAAInfo) { 745 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 746 if (Volatile) 747 Load->setVolatile(true); 748 if (Alignment) 749 Load->setAlignment(Alignment); 750 if (TBAAInfo) 751 CGM.DecorateInstruction(Load, TBAAInfo); 752 753 return EmitFromMemory(Load, Ty); 754} 755 756static bool isBooleanUnderlyingType(QualType Ty) { 757 if (const EnumType *ET = dyn_cast<EnumType>(Ty)) 758 return ET->getDecl()->getIntegerType()->isBooleanType(); 759 return false; 760} 761 762llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 763 // Bool has a different representation in memory than in registers. 764 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 765 // This should really always be an i1, but sometimes it's already 766 // an i8, and it's awkward to track those cases down. 767 if (Value->getType()->isIntegerTy(1)) 768 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool"); 769 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8"); 770 } 771 772 return Value; 773} 774 775llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 776 // Bool has a different representation in memory than in registers. 777 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 778 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8"); 779 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 780 } 781 782 return Value; 783} 784 785void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 786 bool Volatile, unsigned Alignment, 787 QualType Ty, 788 llvm::MDNode *TBAAInfo) { 789 Value = EmitToMemory(Value, Ty); 790 791 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 792 if (Alignment) 793 Store->setAlignment(Alignment); 794 if (TBAAInfo) 795 CGM.DecorateInstruction(Store, TBAAInfo); 796} 797 798void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) { 799 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 800 lvalue.getAlignment(), lvalue.getType(), 801 lvalue.getTBAAInfo()); 802} 803 804/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 805/// method emits the address of the lvalue, then loads the result as an rvalue, 806/// returning the rvalue. 807RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 808 if (LV.isObjCWeak()) { 809 // load of a __weak object. 810 llvm::Value *AddrWeakObj = LV.getAddress(); 811 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 812 AddrWeakObj)); 813 } 814 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) 815 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 816 817 if (LV.isSimple()) { 818 assert(!LV.getType()->isFunctionType()); 819 820 // Everything needs a load. 821 return RValue::get(EmitLoadOfScalar(LV)); 822 } 823 824 if (LV.isVectorElt()) { 825 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 826 LV.isVolatileQualified()); 827 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 828 "vecext")); 829 } 830 831 // If this is a reference to a subset of the elements of a vector, either 832 // shuffle the input or extract/insert them as appropriate. 833 if (LV.isExtVectorElt()) 834 return EmitLoadOfExtVectorElementLValue(LV); 835 836 if (LV.isBitField()) 837 return EmitLoadOfBitfieldLValue(LV); 838 839 assert(LV.isPropertyRef() && "Unknown LValue type!"); 840 return EmitLoadOfPropertyRefLValue(LV); 841} 842 843RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 844 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 845 846 // Get the output type. 847 llvm::Type *ResLTy = ConvertType(LV.getType()); 848 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 849 850 // Compute the result as an OR of all of the individual component accesses. 851 llvm::Value *Res = 0; 852 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 853 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 854 855 // Get the field pointer. 856 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 857 858 // Only offset by the field index if used, so that incoming values are not 859 // required to be structures. 860 if (AI.FieldIndex) 861 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 862 863 // Offset by the byte offset, if used. 864 if (!AI.FieldByteOffset.isZero()) { 865 Ptr = EmitCastToVoidPtr(Ptr); 866 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 867 "bf.field.offs"); 868 } 869 870 // Cast to the access type. 871 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 872 AI.AccessWidth, 873 CGM.getContext().getTargetAddressSpace(LV.getType())); 874 Ptr = Builder.CreateBitCast(Ptr, PTy); 875 876 // Perform the load. 877 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 878 if (!AI.AccessAlignment.isZero()) 879 Load->setAlignment(AI.AccessAlignment.getQuantity()); 880 881 // Shift out unused low bits and mask out unused high bits. 882 llvm::Value *Val = Load; 883 if (AI.FieldBitStart) 884 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 885 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 886 AI.TargetBitWidth), 887 "bf.clear"); 888 889 // Extend or truncate to the target size. 890 if (AI.AccessWidth < ResSizeInBits) 891 Val = Builder.CreateZExt(Val, ResLTy); 892 else if (AI.AccessWidth > ResSizeInBits) 893 Val = Builder.CreateTrunc(Val, ResLTy); 894 895 // Shift into place, and OR into the result. 896 if (AI.TargetBitOffset) 897 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 898 Res = Res ? Builder.CreateOr(Res, Val) : Val; 899 } 900 901 // If the bit-field is signed, perform the sign-extension. 902 // 903 // FIXME: This can easily be folded into the load of the high bits, which 904 // could also eliminate the mask of high bits in some situations. 905 if (Info.isSigned()) { 906 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 907 if (ExtraBits) 908 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 909 ExtraBits, "bf.val.sext"); 910 } 911 912 return RValue::get(Res); 913} 914 915// If this is a reference to a subset of the elements of a vector, create an 916// appropriate shufflevector. 917RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 918 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 919 LV.isVolatileQualified()); 920 921 const llvm::Constant *Elts = LV.getExtVectorElts(); 922 923 // If the result of the expression is a non-vector type, we must be extracting 924 // a single element. Just codegen as an extractelement. 925 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 926 if (!ExprVT) { 927 unsigned InIdx = getAccessedFieldNo(0, Elts); 928 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 929 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 930 } 931 932 // Always use shuffle vector to try to retain the original program structure 933 unsigned NumResultElts = ExprVT->getNumElements(); 934 935 SmallVector<llvm::Constant*, 4> Mask; 936 for (unsigned i = 0; i != NumResultElts; ++i) { 937 unsigned InIdx = getAccessedFieldNo(i, Elts); 938 Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); 939 } 940 941 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 942 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 943 MaskV); 944 return RValue::get(Vec); 945} 946 947 948 949/// EmitStoreThroughLValue - Store the specified rvalue into the specified 950/// lvalue, where both are guaranteed to the have the same type, and that type 951/// is 'Ty'. 952void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) { 953 if (!Dst.isSimple()) { 954 if (Dst.isVectorElt()) { 955 // Read/modify/write the vector, inserting the new element. 956 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 957 Dst.isVolatileQualified()); 958 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 959 Dst.getVectorIdx(), "vecins"); 960 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 961 return; 962 } 963 964 // If this is an update of extended vector elements, insert them as 965 // appropriate. 966 if (Dst.isExtVectorElt()) 967 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 968 969 if (Dst.isBitField()) 970 return EmitStoreThroughBitfieldLValue(Src, Dst); 971 972 assert(Dst.isPropertyRef() && "Unknown LValue type"); 973 return EmitStoreThroughPropertyRefLValue(Src, Dst); 974 } 975 976 // There's special magic for assigning into an ARC-qualified l-value. 977 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 978 switch (Lifetime) { 979 case Qualifiers::OCL_None: 980 llvm_unreachable("present but none"); 981 982 case Qualifiers::OCL_ExplicitNone: 983 // nothing special 984 break; 985 986 case Qualifiers::OCL_Strong: 987 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 988 return; 989 990 case Qualifiers::OCL_Weak: 991 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 992 return; 993 994 case Qualifiers::OCL_Autoreleasing: 995 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 996 Src.getScalarVal())); 997 // fall into the normal path 998 break; 999 } 1000 } 1001 1002 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1003 // load of a __weak object. 1004 llvm::Value *LvalueDst = Dst.getAddress(); 1005 llvm::Value *src = Src.getScalarVal(); 1006 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1007 return; 1008 } 1009 1010 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1011 // load of a __strong object. 1012 llvm::Value *LvalueDst = Dst.getAddress(); 1013 llvm::Value *src = Src.getScalarVal(); 1014 if (Dst.isObjCIvar()) { 1015 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1016 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1017 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1018 llvm::Value *dst = RHS; 1019 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1020 llvm::Value *LHS = 1021 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1022 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1023 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1024 BytesBetween); 1025 } else if (Dst.isGlobalObjCRef()) { 1026 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1027 Dst.isThreadLocalRef()); 1028 } 1029 else 1030 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1031 return; 1032 } 1033 1034 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1035 EmitStoreOfScalar(Src.getScalarVal(), Dst); 1036} 1037 1038void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1039 llvm::Value **Result) { 1040 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1041 1042 // Get the output type. 1043 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1044 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 1045 1046 // Get the source value, truncated to the width of the bit-field. 1047 llvm::Value *SrcVal = Src.getScalarVal(); 1048 1049 if (Dst.getType()->isBooleanType()) 1050 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 1051 1052 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 1053 Info.getSize()), 1054 "bf.value"); 1055 1056 // Return the new value of the bit-field, if requested. 1057 if (Result) { 1058 // Cast back to the proper type for result. 1059 llvm::Type *SrcTy = Src.getScalarVal()->getType(); 1060 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 1061 "bf.reload.val"); 1062 1063 // Sign extend if necessary. 1064 if (Info.isSigned()) { 1065 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1066 if (ExtraBits) 1067 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 1068 ExtraBits, "bf.reload.sext"); 1069 } 1070 1071 *Result = ReloadVal; 1072 } 1073 1074 // Iterate over the components, writing each piece to memory. 1075 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1076 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1077 1078 // Get the field pointer. 1079 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 1080 unsigned addressSpace = 1081 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); 1082 1083 // Only offset by the field index if used, so that incoming values are not 1084 // required to be structures. 1085 if (AI.FieldIndex) 1086 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1087 1088 // Offset by the byte offset, if used. 1089 if (!AI.FieldByteOffset.isZero()) { 1090 Ptr = EmitCastToVoidPtr(Ptr); 1091 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1092 "bf.field.offs"); 1093 } 1094 1095 // Cast to the access type. 1096 llvm::Type *AccessLTy = 1097 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth); 1098 1099 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace); 1100 Ptr = Builder.CreateBitCast(Ptr, PTy); 1101 1102 // Extract the piece of the bit-field value to write in this access, limited 1103 // to the values that are part of this access. 1104 llvm::Value *Val = SrcVal; 1105 if (AI.TargetBitOffset) 1106 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 1107 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 1108 AI.TargetBitWidth)); 1109 1110 // Extend or truncate to the access size. 1111 if (ResSizeInBits < AI.AccessWidth) 1112 Val = Builder.CreateZExt(Val, AccessLTy); 1113 else if (ResSizeInBits > AI.AccessWidth) 1114 Val = Builder.CreateTrunc(Val, AccessLTy); 1115 1116 // Shift into the position in memory. 1117 if (AI.FieldBitStart) 1118 Val = Builder.CreateShl(Val, AI.FieldBitStart); 1119 1120 // If necessary, load and OR in bits that are outside of the bit-field. 1121 if (AI.TargetBitWidth != AI.AccessWidth) { 1122 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 1123 if (!AI.AccessAlignment.isZero()) 1124 Load->setAlignment(AI.AccessAlignment.getQuantity()); 1125 1126 // Compute the mask for zeroing the bits that are part of the bit-field. 1127 llvm::APInt InvMask = 1128 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 1129 AI.FieldBitStart + AI.TargetBitWidth); 1130 1131 // Apply the mask and OR in to the value to write. 1132 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1133 } 1134 1135 // Write the value. 1136 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1137 Dst.isVolatileQualified()); 1138 if (!AI.AccessAlignment.isZero()) 1139 Store->setAlignment(AI.AccessAlignment.getQuantity()); 1140 } 1141} 1142 1143void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1144 LValue Dst) { 1145 // This access turns into a read/modify/write of the vector. Load the input 1146 // value now. 1147 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 1148 Dst.isVolatileQualified()); 1149 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1150 1151 llvm::Value *SrcVal = Src.getScalarVal(); 1152 1153 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1154 unsigned NumSrcElts = VTy->getNumElements(); 1155 unsigned NumDstElts = 1156 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1157 if (NumDstElts == NumSrcElts) { 1158 // Use shuffle vector is the src and destination are the same number of 1159 // elements and restore the vector mask since it is on the side it will be 1160 // stored. 1161 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1162 for (unsigned i = 0; i != NumSrcElts; ++i) { 1163 unsigned InIdx = getAccessedFieldNo(i, Elts); 1164 Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); 1165 } 1166 1167 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1168 Vec = Builder.CreateShuffleVector(SrcVal, 1169 llvm::UndefValue::get(Vec->getType()), 1170 MaskV); 1171 } else if (NumDstElts > NumSrcElts) { 1172 // Extended the source vector to the same length and then shuffle it 1173 // into the destination. 1174 // FIXME: since we're shuffling with undef, can we just use the indices 1175 // into that? This could be simpler. 1176 SmallVector<llvm::Constant*, 4> ExtMask; 1177 unsigned i; 1178 for (i = 0; i != NumSrcElts; ++i) 1179 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1180 for (; i != NumDstElts; ++i) 1181 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1182 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1183 llvm::Value *ExtSrcVal = 1184 Builder.CreateShuffleVector(SrcVal, 1185 llvm::UndefValue::get(SrcVal->getType()), 1186 ExtMaskV); 1187 // build identity 1188 SmallVector<llvm::Constant*, 4> Mask; 1189 for (unsigned i = 0; i != NumDstElts; ++i) 1190 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1191 1192 // modify when what gets shuffled in 1193 for (unsigned i = 0; i != NumSrcElts; ++i) { 1194 unsigned Idx = getAccessedFieldNo(i, Elts); 1195 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 1196 } 1197 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1198 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1199 } else { 1200 // We should never shorten the vector 1201 llvm_unreachable("unexpected shorten vector length"); 1202 } 1203 } else { 1204 // If the Src is a scalar (not a vector) it must be updating one element. 1205 unsigned InIdx = getAccessedFieldNo(0, Elts); 1206 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1207 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1208 } 1209 1210 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1211} 1212 1213// setObjCGCLValueClass - sets class of he lvalue for the purpose of 1214// generating write-barries API. It is currently a global, ivar, 1215// or neither. 1216static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1217 LValue &LV, 1218 bool IsMemberAccess=false) { 1219 if (Ctx.getLangOptions().getGC() == LangOptions::NonGC) 1220 return; 1221 1222 if (isa<ObjCIvarRefExpr>(E)) { 1223 QualType ExpTy = E->getType(); 1224 if (IsMemberAccess && ExpTy->isPointerType()) { 1225 // If ivar is a structure pointer, assigning to field of 1226 // this struct follows gcc's behavior and makes it a non-ivar 1227 // writer-barrier conservatively. 1228 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1229 if (ExpTy->isRecordType()) { 1230 LV.setObjCIvar(false); 1231 return; 1232 } 1233 } 1234 LV.setObjCIvar(true); 1235 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1236 LV.setBaseIvarExp(Exp->getBase()); 1237 LV.setObjCArray(E->getType()->isArrayType()); 1238 return; 1239 } 1240 1241 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1242 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1243 if (VD->hasGlobalStorage()) { 1244 LV.setGlobalObjCRef(true); 1245 LV.setThreadLocalRef(VD->isThreadSpecified()); 1246 } 1247 } 1248 LV.setObjCArray(E->getType()->isArrayType()); 1249 return; 1250 } 1251 1252 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1253 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1254 return; 1255 } 1256 1257 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1258 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1259 if (LV.isObjCIvar()) { 1260 // If cast is to a structure pointer, follow gcc's behavior and make it 1261 // a non-ivar write-barrier. 1262 QualType ExpTy = E->getType(); 1263 if (ExpTy->isPointerType()) 1264 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1265 if (ExpTy->isRecordType()) 1266 LV.setObjCIvar(false); 1267 } 1268 return; 1269 } 1270 1271 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1272 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1273 return; 1274 } 1275 1276 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1277 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1278 return; 1279 } 1280 1281 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1282 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1283 return; 1284 } 1285 1286 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1287 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1288 return; 1289 } 1290 1291 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1292 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1293 if (LV.isObjCIvar() && !LV.isObjCArray()) 1294 // Using array syntax to assigning to what an ivar points to is not 1295 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1296 LV.setObjCIvar(false); 1297 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1298 // Using array syntax to assigning to what global points to is not 1299 // same as assigning to the global itself. {id *G;} G[i] = 0; 1300 LV.setGlobalObjCRef(false); 1301 return; 1302 } 1303 1304 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1305 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1306 // We don't know if member is an 'ivar', but this flag is looked at 1307 // only in the context of LV.isObjCIvar(). 1308 LV.setObjCArray(E->getType()->isArrayType()); 1309 return; 1310 } 1311} 1312 1313static llvm::Value * 1314EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1315 llvm::Value *V, llvm::Type *IRType, 1316 StringRef Name = StringRef()) { 1317 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1318 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1319} 1320 1321static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1322 const Expr *E, const VarDecl *VD) { 1323 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1324 "Var decl must have external storage or be a file var decl!"); 1325 1326 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1327 if (VD->getType()->isReferenceType()) 1328 V = CGF.Builder.CreateLoad(V); 1329 1330 V = EmitBitCastOfLValueToProperType(CGF, V, 1331 CGF.getTypes().ConvertTypeForMem(E->getType())); 1332 1333 unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity(); 1334 LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1335 setObjCGCLValueClass(CGF.getContext(), E, LV); 1336 return LV; 1337} 1338 1339static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1340 const Expr *E, const FunctionDecl *FD) { 1341 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1342 if (!FD->hasPrototype()) { 1343 if (const FunctionProtoType *Proto = 1344 FD->getType()->getAs<FunctionProtoType>()) { 1345 // Ugly case: for a K&R-style definition, the type of the definition 1346 // isn't the same as the type of a use. Correct for this with a 1347 // bitcast. 1348 QualType NoProtoType = 1349 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1350 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1351 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1352 } 1353 } 1354 unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity(); 1355 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1356} 1357 1358LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1359 const NamedDecl *ND = E->getDecl(); 1360 unsigned Alignment = getContext().getDeclAlign(ND).getQuantity(); 1361 1362 if (ND->hasAttr<WeakRefAttr>()) { 1363 const ValueDecl *VD = cast<ValueDecl>(ND); 1364 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1365 return MakeAddrLValue(Aliasee, E->getType(), Alignment); 1366 } 1367 1368 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1369 1370 // Check if this is a global variable. 1371 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1372 return EmitGlobalVarDeclLValue(*this, E, VD); 1373 1374 bool NonGCable = VD->hasLocalStorage() && 1375 !VD->getType()->isReferenceType() && 1376 !VD->hasAttr<BlocksAttr>(); 1377 1378 llvm::Value *V = LocalDeclMap[VD]; 1379 if (!V && VD->isStaticLocal()) 1380 V = CGM.getStaticLocalDeclAddress(VD); 1381 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1382 1383 if (VD->hasAttr<BlocksAttr>()) 1384 V = BuildBlockByrefAddress(V, VD); 1385 1386 if (VD->getType()->isReferenceType()) 1387 V = Builder.CreateLoad(V); 1388 1389 V = EmitBitCastOfLValueToProperType(*this, V, 1390 getTypes().ConvertTypeForMem(E->getType())); 1391 1392 LValue LV = MakeAddrLValue(V, E->getType(), Alignment); 1393 if (NonGCable) { 1394 LV.getQuals().removeObjCGCAttr(); 1395 LV.setNonGC(true); 1396 } 1397 setObjCGCLValueClass(getContext(), E, LV); 1398 return LV; 1399 } 1400 1401 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1402 return EmitFunctionDeclLValue(*this, E, fn); 1403 1404 llvm_unreachable("Unhandled DeclRefExpr"); 1405 1406 // an invalid LValue, but the assert will 1407 // ensure that this point is never reached. 1408 return LValue(); 1409} 1410 1411LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1412 unsigned Alignment = 1413 getContext().getDeclAlign(E->getDecl()).getQuantity(); 1414 return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment); 1415} 1416 1417LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1418 // __extension__ doesn't affect lvalue-ness. 1419 if (E->getOpcode() == UO_Extension) 1420 return EmitLValue(E->getSubExpr()); 1421 1422 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1423 switch (E->getOpcode()) { 1424 default: llvm_unreachable("Unknown unary operator lvalue!"); 1425 case UO_Deref: { 1426 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1427 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1428 1429 LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1430 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1431 1432 // We should not generate __weak write barrier on indirect reference 1433 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1434 // But, we continue to generate __strong write barrier on indirect write 1435 // into a pointer to object. 1436 if (getContext().getLangOptions().ObjC1 && 1437 getContext().getLangOptions().getGC() != LangOptions::NonGC && 1438 LV.isObjCWeak()) 1439 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1440 return LV; 1441 } 1442 case UO_Real: 1443 case UO_Imag: { 1444 LValue LV = EmitLValue(E->getSubExpr()); 1445 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1446 llvm::Value *Addr = LV.getAddress(); 1447 1448 // real and imag are valid on scalars. This is a faster way of 1449 // testing that. 1450 if (!cast<llvm::PointerType>(Addr->getType()) 1451 ->getElementType()->isStructTy()) { 1452 assert(E->getSubExpr()->getType()->isArithmeticType()); 1453 return LV; 1454 } 1455 1456 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1457 1458 unsigned Idx = E->getOpcode() == UO_Imag; 1459 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1460 Idx, "idx"), 1461 ExprTy); 1462 } 1463 case UO_PreInc: 1464 case UO_PreDec: { 1465 LValue LV = EmitLValue(E->getSubExpr()); 1466 bool isInc = E->getOpcode() == UO_PreInc; 1467 1468 if (E->getType()->isAnyComplexType()) 1469 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1470 else 1471 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1472 return LV; 1473 } 1474 } 1475} 1476 1477LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1478 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1479 E->getType()); 1480} 1481 1482LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1483 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1484 E->getType()); 1485} 1486 1487 1488LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1489 switch (E->getIdentType()) { 1490 default: 1491 return EmitUnsupportedLValue(E, "predefined expression"); 1492 1493 case PredefinedExpr::Func: 1494 case PredefinedExpr::Function: 1495 case PredefinedExpr::PrettyFunction: { 1496 unsigned Type = E->getIdentType(); 1497 std::string GlobalVarName; 1498 1499 switch (Type) { 1500 default: llvm_unreachable("Invalid type"); 1501 case PredefinedExpr::Func: 1502 GlobalVarName = "__func__."; 1503 break; 1504 case PredefinedExpr::Function: 1505 GlobalVarName = "__FUNCTION__."; 1506 break; 1507 case PredefinedExpr::PrettyFunction: 1508 GlobalVarName = "__PRETTY_FUNCTION__."; 1509 break; 1510 } 1511 1512 StringRef FnName = CurFn->getName(); 1513 if (FnName.startswith("\01")) 1514 FnName = FnName.substr(1); 1515 GlobalVarName += FnName; 1516 1517 const Decl *CurDecl = CurCodeDecl; 1518 if (CurDecl == 0) 1519 CurDecl = getContext().getTranslationUnitDecl(); 1520 1521 std::string FunctionName = 1522 (isa<BlockDecl>(CurDecl) 1523 ? FnName.str() 1524 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl)); 1525 1526 llvm::Constant *C = 1527 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1528 return MakeAddrLValue(C, E->getType()); 1529 } 1530 } 1531} 1532 1533llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1534 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1535 1536 // If we are not optimzing, don't collapse all calls to trap in the function 1537 // to the same call, that way, in the debugger they can see which operation 1538 // did in fact fail. If we are optimizing, we collapse all calls to trap down 1539 // to just one per function to save on codesize. 1540 if (GCO.OptimizationLevel && TrapBB) 1541 return TrapBB; 1542 1543 llvm::BasicBlock *Cont = 0; 1544 if (HaveInsertPoint()) { 1545 Cont = createBasicBlock("cont"); 1546 EmitBranch(Cont); 1547 } 1548 TrapBB = createBasicBlock("trap"); 1549 EmitBlock(TrapBB); 1550 1551 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 1552 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1553 TrapCall->setDoesNotReturn(); 1554 TrapCall->setDoesNotThrow(); 1555 Builder.CreateUnreachable(); 1556 1557 if (Cont) 1558 EmitBlock(Cont); 1559 return TrapBB; 1560} 1561 1562/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 1563/// array to pointer, return the array subexpression. 1564static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 1565 // If this isn't just an array->pointer decay, bail out. 1566 const CastExpr *CE = dyn_cast<CastExpr>(E); 1567 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 1568 return 0; 1569 1570 // If this is a decay from variable width array, bail out. 1571 const Expr *SubExpr = CE->getSubExpr(); 1572 if (SubExpr->getType()->isVariableArrayType()) 1573 return 0; 1574 1575 return SubExpr; 1576} 1577 1578LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1579 // The index must always be an integer, which is not an aggregate. Emit it. 1580 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1581 QualType IdxTy = E->getIdx()->getType(); 1582 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 1583 1584 // If the base is a vector type, then we are forming a vector element lvalue 1585 // with this subscript. 1586 if (E->getBase()->getType()->isVectorType()) { 1587 // Emit the vector as an lvalue to get its address. 1588 LValue LHS = EmitLValue(E->getBase()); 1589 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1590 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 1591 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1592 E->getBase()->getType()); 1593 } 1594 1595 // Extend or truncate the index type to 32 or 64-bits. 1596 if (Idx->getType() != IntPtrTy) 1597 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 1598 1599 // FIXME: As llvm implements the object size checking, this can come out. 1600 if (CatchUndefined) { 1601 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ 1602 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1603 if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1604 if (const ConstantArrayType *CAT 1605 = getContext().getAsConstantArrayType(DRE->getType())) { 1606 llvm::APInt Size = CAT->getSize(); 1607 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1608 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1609 llvm::ConstantInt::get(Idx->getType(), Size)), 1610 Cont, getTrapBB()); 1611 EmitBlock(Cont); 1612 } 1613 } 1614 } 1615 } 1616 } 1617 1618 // We know that the pointer points to a type of the correct size, unless the 1619 // size is a VLA or Objective-C interface. 1620 llvm::Value *Address = 0; 1621 unsigned ArrayAlignment = 0; 1622 if (const VariableArrayType *vla = 1623 getContext().getAsVariableArrayType(E->getType())) { 1624 // The base must be a pointer, which is not an aggregate. Emit 1625 // it. It needs to be emitted first in case it's what captures 1626 // the VLA bounds. 1627 Address = EmitScalarExpr(E->getBase()); 1628 1629 // The element count here is the total number of non-VLA elements. 1630 llvm::Value *numElements = getVLASize(vla).first; 1631 1632 // Effectively, the multiply by the VLA size is part of the GEP. 1633 // GEP indexes are signed, and scaling an index isn't permitted to 1634 // signed-overflow, so we use the same semantics for our explicit 1635 // multiply. We suppress this if overflow is not undefined behavior. 1636 if (getLangOptions().isSignedOverflowDefined()) { 1637 Idx = Builder.CreateMul(Idx, numElements); 1638 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1639 } else { 1640 Idx = Builder.CreateNSWMul(Idx, numElements); 1641 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 1642 } 1643 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 1644 // Indexing over an interface, as in "NSString *P; P[4];" 1645 llvm::Value *InterfaceSize = 1646 llvm::ConstantInt::get(Idx->getType(), 1647 getContext().getTypeSizeInChars(OIT).getQuantity()); 1648 1649 Idx = Builder.CreateMul(Idx, InterfaceSize); 1650 1651 // The base must be a pointer, which is not an aggregate. Emit it. 1652 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1653 Address = EmitCastToVoidPtr(Base); 1654 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1655 Address = Builder.CreateBitCast(Address, Base->getType()); 1656 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 1657 // If this is A[i] where A is an array, the frontend will have decayed the 1658 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 1659 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 1660 // "gep x, i" here. Emit one "gep A, 0, i". 1661 assert(Array->getType()->isArrayType() && 1662 "Array to pointer decay must have array source type!"); 1663 LValue ArrayLV = EmitLValue(Array); 1664 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 1665 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1666 llvm::Value *Args[] = { Zero, Idx }; 1667 1668 // Propagate the alignment from the array itself to the result. 1669 ArrayAlignment = ArrayLV.getAlignment(); 1670 1671 if (getContext().getLangOptions().isSignedOverflowDefined()) 1672 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 1673 else 1674 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 1675 } else { 1676 // The base must be a pointer, which is not an aggregate. Emit it. 1677 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1678 if (getContext().getLangOptions().isSignedOverflowDefined()) 1679 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 1680 else 1681 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1682 } 1683 1684 QualType T = E->getBase()->getType()->getPointeeType(); 1685 assert(!T.isNull() && 1686 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1687 1688 // Limit the alignment to that of the result type. 1689 if (ArrayAlignment) { 1690 unsigned Align = getContext().getTypeAlignInChars(T).getQuantity(); 1691 ArrayAlignment = std::min(Align, ArrayAlignment); 1692 } 1693 1694 LValue LV = MakeAddrLValue(Address, T, ArrayAlignment); 1695 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 1696 1697 if (getContext().getLangOptions().ObjC1 && 1698 getContext().getLangOptions().getGC() != LangOptions::NonGC) { 1699 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1700 setObjCGCLValueClass(getContext(), E, LV); 1701 } 1702 return LV; 1703} 1704 1705static 1706llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1707 SmallVector<unsigned, 4> &Elts) { 1708 SmallVector<llvm::Constant*, 4> CElts; 1709 1710 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1711 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1712 CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); 1713 1714 return llvm::ConstantVector::get(CElts); 1715} 1716 1717LValue CodeGenFunction:: 1718EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1719 // Emit the base vector as an l-value. 1720 LValue Base; 1721 1722 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1723 if (E->isArrow()) { 1724 // If it is a pointer to a vector, emit the address and form an lvalue with 1725 // it. 1726 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1727 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1728 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 1729 Base.getQuals().removeObjCGCAttr(); 1730 } else if (E->getBase()->isGLValue()) { 1731 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1732 // emit the base as an lvalue. 1733 assert(E->getBase()->getType()->isVectorType()); 1734 Base = EmitLValue(E->getBase()); 1735 } else { 1736 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1737 assert(E->getBase()->getType()->isVectorType() && 1738 "Result must be a vector"); 1739 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1740 1741 // Store the vector to memory (because LValue wants an address). 1742 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1743 Builder.CreateStore(Vec, VecMem); 1744 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 1745 } 1746 1747 QualType type = 1748 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 1749 1750 // Encode the element access list into a vector of unsigned indices. 1751 SmallVector<unsigned, 4> Indices; 1752 E->getEncodedElementAccess(Indices); 1753 1754 if (Base.isSimple()) { 1755 llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices); 1756 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type); 1757 } 1758 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1759 1760 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1761 SmallVector<llvm::Constant *, 4> CElts; 1762 1763 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1764 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1765 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1766 else 1767 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1768 } 1769 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 1770 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type); 1771} 1772 1773LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1774 bool isNonGC = false; 1775 Expr *BaseExpr = E->getBase(); 1776 llvm::Value *BaseValue = NULL; 1777 Qualifiers BaseQuals; 1778 1779 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1780 if (E->isArrow()) { 1781 BaseValue = EmitScalarExpr(BaseExpr); 1782 const PointerType *PTy = 1783 BaseExpr->getType()->getAs<PointerType>(); 1784 BaseQuals = PTy->getPointeeType().getQualifiers(); 1785 } else { 1786 LValue BaseLV = EmitLValue(BaseExpr); 1787 if (BaseLV.isNonGC()) 1788 isNonGC = true; 1789 // FIXME: this isn't right for bitfields. 1790 BaseValue = BaseLV.getAddress(); 1791 QualType BaseTy = BaseExpr->getType(); 1792 BaseQuals = BaseTy.getQualifiers(); 1793 } 1794 1795 NamedDecl *ND = E->getMemberDecl(); 1796 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1797 LValue LV = EmitLValueForField(BaseValue, Field, 1798 BaseQuals.getCVRQualifiers()); 1799 LV.setNonGC(isNonGC); 1800 setObjCGCLValueClass(getContext(), E, LV); 1801 return LV; 1802 } 1803 1804 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1805 return EmitGlobalVarDeclLValue(*this, E, VD); 1806 1807 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1808 return EmitFunctionDeclLValue(*this, E, FD); 1809 1810 llvm_unreachable("Unhandled member declaration!"); 1811} 1812 1813LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue, 1814 const FieldDecl *Field, 1815 unsigned CVRQualifiers) { 1816 const CGRecordLayout &RL = 1817 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1818 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1819 return LValue::MakeBitfield(BaseValue, Info, 1820 Field->getType().withCVRQualifiers(CVRQualifiers)); 1821} 1822 1823/// EmitLValueForAnonRecordField - Given that the field is a member of 1824/// an anonymous struct or union buried inside a record, and given 1825/// that the base value is a pointer to the enclosing record, derive 1826/// an lvalue for the ultimate field. 1827LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1828 const IndirectFieldDecl *Field, 1829 unsigned CVRQualifiers) { 1830 IndirectFieldDecl::chain_iterator I = Field->chain_begin(), 1831 IEnd = Field->chain_end(); 1832 while (true) { 1833 LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), 1834 CVRQualifiers); 1835 if (++I == IEnd) return LV; 1836 1837 assert(LV.isSimple()); 1838 BaseValue = LV.getAddress(); 1839 CVRQualifiers |= LV.getVRQualifiers(); 1840 } 1841} 1842 1843LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr, 1844 const FieldDecl *field, 1845 unsigned cvr) { 1846 if (field->isBitField()) 1847 return EmitLValueForBitfield(baseAddr, field, cvr); 1848 1849 const RecordDecl *rec = field->getParent(); 1850 QualType type = field->getType(); 1851 1852 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 1853 1854 llvm::Value *addr = baseAddr; 1855 if (rec->isUnion()) { 1856 // For unions, there is no pointer adjustment. 1857 assert(!type->isReferenceType() && "union has reference member"); 1858 } else { 1859 // For structs, we GEP to the field that the record layout suggests. 1860 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 1861 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 1862 1863 // If this is a reference field, load the reference right now. 1864 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 1865 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 1866 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 1867 1868 if (CGM.shouldUseTBAA()) { 1869 llvm::MDNode *tbaa; 1870 if (mayAlias) 1871 tbaa = CGM.getTBAAInfo(getContext().CharTy); 1872 else 1873 tbaa = CGM.getTBAAInfo(type); 1874 CGM.DecorateInstruction(load, tbaa); 1875 } 1876 1877 addr = load; 1878 mayAlias = false; 1879 type = refType->getPointeeType(); 1880 cvr = 0; // qualifiers don't recursively apply to referencee 1881 } 1882 } 1883 1884 // Make sure that the address is pointing to the right type. This is critical 1885 // for both unions and structs. A union needs a bitcast, a struct element 1886 // will need a bitcast if the LLVM type laid out doesn't match the desired 1887 // type. 1888 addr = EmitBitCastOfLValueToProperType(*this, addr, 1889 CGM.getTypes().ConvertTypeForMem(type), 1890 field->getName()); 1891 1892 if (field->hasAttr<AnnotateAttr>()) 1893 addr = EmitFieldAnnotations(field, addr); 1894 1895 unsigned alignment = getContext().getDeclAlign(field).getQuantity(); 1896 LValue LV = MakeAddrLValue(addr, type, alignment); 1897 LV.getQuals().addCVRQualifiers(cvr); 1898 1899 // __weak attribute on a field is ignored. 1900 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 1901 LV.getQuals().removeObjCGCAttr(); 1902 1903 // Fields of may_alias structs act like 'char' for TBAA purposes. 1904 // FIXME: this should get propagated down through anonymous structs 1905 // and unions. 1906 if (mayAlias && LV.getTBAAInfo()) 1907 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 1908 1909 return LV; 1910} 1911 1912LValue 1913CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue, 1914 const FieldDecl *Field, 1915 unsigned CVRQualifiers) { 1916 QualType FieldType = Field->getType(); 1917 1918 if (!FieldType->isReferenceType()) 1919 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1920 1921 const CGRecordLayout &RL = 1922 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1923 unsigned idx = RL.getLLVMFieldNo(Field); 1924 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx); 1925 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1926 1927 1928 // Make sure that the address is pointing to the right type. This is critical 1929 // for both unions and structs. A union needs a bitcast, a struct element 1930 // will need a bitcast if the LLVM type laid out doesn't match the desired 1931 // type. 1932 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 1933 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1934 V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS)); 1935 1936 unsigned Alignment = getContext().getDeclAlign(Field).getQuantity(); 1937 return MakeAddrLValue(V, FieldType, Alignment); 1938} 1939 1940LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 1941 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1942 const Expr *InitExpr = E->getInitializer(); 1943 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 1944 1945 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 1946 /*Init*/ true); 1947 1948 return Result; 1949} 1950 1951LValue CodeGenFunction:: 1952EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 1953 if (!expr->isGLValue()) { 1954 // ?: here should be an aggregate. 1955 assert((hasAggregateLLVMType(expr->getType()) && 1956 !expr->getType()->isAnyComplexType()) && 1957 "Unexpected conditional operator!"); 1958 return EmitAggExprToLValue(expr); 1959 } 1960 1961 const Expr *condExpr = expr->getCond(); 1962 bool CondExprBool; 1963 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 1964 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 1965 if (!CondExprBool) std::swap(live, dead); 1966 1967 if (!ContainsLabel(dead)) 1968 return EmitLValue(live); 1969 } 1970 1971 OpaqueValueMapping binding(*this, expr); 1972 1973 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 1974 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 1975 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 1976 1977 ConditionalEvaluation eval(*this); 1978 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 1979 1980 // Any temporaries created here are conditional. 1981 EmitBlock(lhsBlock); 1982 eval.begin(*this); 1983 LValue lhs = EmitLValue(expr->getTrueExpr()); 1984 eval.end(*this); 1985 1986 if (!lhs.isSimple()) 1987 return EmitUnsupportedLValue(expr, "conditional operator"); 1988 1989 lhsBlock = Builder.GetInsertBlock(); 1990 Builder.CreateBr(contBlock); 1991 1992 // Any temporaries created here are conditional. 1993 EmitBlock(rhsBlock); 1994 eval.begin(*this); 1995 LValue rhs = EmitLValue(expr->getFalseExpr()); 1996 eval.end(*this); 1997 if (!rhs.isSimple()) 1998 return EmitUnsupportedLValue(expr, "conditional operator"); 1999 rhsBlock = Builder.GetInsertBlock(); 2000 2001 EmitBlock(contBlock); 2002 2003 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2004 "cond-lvalue"); 2005 phi->addIncoming(lhs.getAddress(), lhsBlock); 2006 phi->addIncoming(rhs.getAddress(), rhsBlock); 2007 return MakeAddrLValue(phi, expr->getType()); 2008} 2009 2010/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 2011/// If the cast is a dynamic_cast, we can have the usual lvalue result, 2012/// otherwise if a cast is needed by the code generator in an lvalue context, 2013/// then it must mean that we need the address of an aggregate in order to 2014/// access one of its fields. This can happen for all the reasons that casts 2015/// are permitted with aggregate result, including noop aggregate casts, and 2016/// cast from scalar to union. 2017LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2018 switch (E->getCastKind()) { 2019 case CK_ToVoid: 2020 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2021 2022 case CK_Dependent: 2023 llvm_unreachable("dependent cast kind in IR gen!"); 2024 2025 case CK_GetObjCProperty: { 2026 LValue LV = EmitLValue(E->getSubExpr()); 2027 assert(LV.isPropertyRef()); 2028 RValue RV = EmitLoadOfPropertyRefLValue(LV); 2029 2030 // Property is an aggregate r-value. 2031 if (RV.isAggregate()) { 2032 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2033 } 2034 2035 // Implicit property returns an l-value. 2036 assert(RV.isScalar()); 2037 return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType()); 2038 } 2039 2040 case CK_NoOp: 2041 case CK_LValueToRValue: 2042 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2043 || E->getType()->isRecordType()) 2044 return EmitLValue(E->getSubExpr()); 2045 // Fall through to synthesize a temporary. 2046 2047 case CK_BitCast: 2048 case CK_ArrayToPointerDecay: 2049 case CK_FunctionToPointerDecay: 2050 case CK_NullToMemberPointer: 2051 case CK_NullToPointer: 2052 case CK_IntegralToPointer: 2053 case CK_PointerToIntegral: 2054 case CK_PointerToBoolean: 2055 case CK_VectorSplat: 2056 case CK_IntegralCast: 2057 case CK_IntegralToBoolean: 2058 case CK_IntegralToFloating: 2059 case CK_FloatingToIntegral: 2060 case CK_FloatingToBoolean: 2061 case CK_FloatingCast: 2062 case CK_FloatingRealToComplex: 2063 case CK_FloatingComplexToReal: 2064 case CK_FloatingComplexToBoolean: 2065 case CK_FloatingComplexCast: 2066 case CK_FloatingComplexToIntegralComplex: 2067 case CK_IntegralRealToComplex: 2068 case CK_IntegralComplexToReal: 2069 case CK_IntegralComplexToBoolean: 2070 case CK_IntegralComplexCast: 2071 case CK_IntegralComplexToFloatingComplex: 2072 case CK_DerivedToBaseMemberPointer: 2073 case CK_BaseToDerivedMemberPointer: 2074 case CK_MemberPointerToBoolean: 2075 case CK_AnyPointerToBlockPointerCast: 2076 case CK_ARCProduceObject: 2077 case CK_ARCConsumeObject: 2078 case CK_ARCReclaimReturnedObject: 2079 case CK_ARCExtendBlockObject: { 2080 // These casts only produce lvalues when we're binding a reference to a 2081 // temporary realized from a (converted) pure rvalue. Emit the expression 2082 // as a value, copy it into a temporary, and return an lvalue referring to 2083 // that temporary. 2084 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2085 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2086 return MakeAddrLValue(V, E->getType()); 2087 } 2088 2089 case CK_Dynamic: { 2090 LValue LV = EmitLValue(E->getSubExpr()); 2091 llvm::Value *V = LV.getAddress(); 2092 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2093 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2094 } 2095 2096 case CK_ConstructorConversion: 2097 case CK_UserDefinedConversion: 2098 case CK_CPointerToObjCPointerCast: 2099 case CK_BlockPointerToObjCPointerCast: 2100 return EmitLValue(E->getSubExpr()); 2101 2102 case CK_UncheckedDerivedToBase: 2103 case CK_DerivedToBase: { 2104 const RecordType *DerivedClassTy = 2105 E->getSubExpr()->getType()->getAs<RecordType>(); 2106 CXXRecordDecl *DerivedClassDecl = 2107 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2108 2109 LValue LV = EmitLValue(E->getSubExpr()); 2110 llvm::Value *This = LV.getAddress(); 2111 2112 // Perform the derived-to-base conversion 2113 llvm::Value *Base = 2114 GetAddressOfBaseClass(This, DerivedClassDecl, 2115 E->path_begin(), E->path_end(), 2116 /*NullCheckValue=*/false); 2117 2118 return MakeAddrLValue(Base, E->getType()); 2119 } 2120 case CK_ToUnion: 2121 return EmitAggExprToLValue(E); 2122 case CK_BaseToDerived: { 2123 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2124 CXXRecordDecl *DerivedClassDecl = 2125 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2126 2127 LValue LV = EmitLValue(E->getSubExpr()); 2128 2129 // Perform the base-to-derived conversion 2130 llvm::Value *Derived = 2131 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2132 E->path_begin(), E->path_end(), 2133 /*NullCheckValue=*/false); 2134 2135 return MakeAddrLValue(Derived, E->getType()); 2136 } 2137 case CK_LValueBitCast: { 2138 // This must be a reinterpret_cast (or c-style equivalent). 2139 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2140 2141 LValue LV = EmitLValue(E->getSubExpr()); 2142 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2143 ConvertType(CE->getTypeAsWritten())); 2144 return MakeAddrLValue(V, E->getType()); 2145 } 2146 case CK_ObjCObjectLValueCast: { 2147 LValue LV = EmitLValue(E->getSubExpr()); 2148 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2149 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2150 ConvertType(ToType)); 2151 return MakeAddrLValue(V, E->getType()); 2152 } 2153 } 2154 2155 llvm_unreachable("Unhandled lvalue cast kind?"); 2156} 2157 2158LValue CodeGenFunction::EmitNullInitializationLValue( 2159 const CXXScalarValueInitExpr *E) { 2160 QualType Ty = E->getType(); 2161 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2162 EmitNullInitialization(LV.getAddress(), Ty); 2163 return LV; 2164} 2165 2166LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2167 assert(e->isGLValue() || e->getType()->isRecordType()); 2168 return getOpaqueLValueMapping(e); 2169} 2170 2171LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2172 const MaterializeTemporaryExpr *E) { 2173 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2174 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2175} 2176 2177 2178//===--------------------------------------------------------------------===// 2179// Expression Emission 2180//===--------------------------------------------------------------------===// 2181 2182RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2183 ReturnValueSlot ReturnValue) { 2184 if (CGDebugInfo *DI = getDebugInfo()) 2185 DI->EmitLocation(Builder, E->getLocStart()); 2186 2187 // Builtins never have block type. 2188 if (E->getCallee()->getType()->isBlockPointerType()) 2189 return EmitBlockCallExpr(E, ReturnValue); 2190 2191 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2192 return EmitCXXMemberCallExpr(CE, ReturnValue); 2193 2194 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2195 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2196 2197 const Decl *TargetDecl = E->getCalleeDecl(); 2198 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2199 if (unsigned builtinID = FD->getBuiltinID()) 2200 return EmitBuiltinExpr(FD, builtinID, E); 2201 } 2202 2203 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2204 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2205 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2206 2207 if (const CXXPseudoDestructorExpr *PseudoDtor 2208 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2209 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2210 if (getContext().getLangOptions().ObjCAutoRefCount && 2211 DestroyedType->isObjCLifetimeType() && 2212 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2213 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2214 // Automatic Reference Counting: 2215 // If the pseudo-expression names a retainable object with weak or 2216 // strong lifetime, the object shall be released. 2217 Expr *BaseExpr = PseudoDtor->getBase(); 2218 llvm::Value *BaseValue = NULL; 2219 Qualifiers BaseQuals; 2220 2221 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2222 if (PseudoDtor->isArrow()) { 2223 BaseValue = EmitScalarExpr(BaseExpr); 2224 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2225 BaseQuals = PTy->getPointeeType().getQualifiers(); 2226 } else { 2227 LValue BaseLV = EmitLValue(BaseExpr); 2228 BaseValue = BaseLV.getAddress(); 2229 QualType BaseTy = BaseExpr->getType(); 2230 BaseQuals = BaseTy.getQualifiers(); 2231 } 2232 2233 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2234 case Qualifiers::OCL_None: 2235 case Qualifiers::OCL_ExplicitNone: 2236 case Qualifiers::OCL_Autoreleasing: 2237 break; 2238 2239 case Qualifiers::OCL_Strong: 2240 EmitARCRelease(Builder.CreateLoad(BaseValue, 2241 PseudoDtor->getDestroyedType().isVolatileQualified()), 2242 /*precise*/ true); 2243 break; 2244 2245 case Qualifiers::OCL_Weak: 2246 EmitARCDestroyWeak(BaseValue); 2247 break; 2248 } 2249 } else { 2250 // C++ [expr.pseudo]p1: 2251 // The result shall only be used as the operand for the function call 2252 // operator (), and the result of such a call has type void. The only 2253 // effect is the evaluation of the postfix-expression before the dot or 2254 // arrow. 2255 EmitScalarExpr(E->getCallee()); 2256 } 2257 2258 return RValue::get(0); 2259 } 2260 2261 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2262 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2263 E->arg_begin(), E->arg_end(), TargetDecl); 2264} 2265 2266LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2267 // Comma expressions just emit their LHS then their RHS as an l-value. 2268 if (E->getOpcode() == BO_Comma) { 2269 EmitIgnoredExpr(E->getLHS()); 2270 EnsureInsertPoint(); 2271 return EmitLValue(E->getRHS()); 2272 } 2273 2274 if (E->getOpcode() == BO_PtrMemD || 2275 E->getOpcode() == BO_PtrMemI) 2276 return EmitPointerToDataMemberBinaryExpr(E); 2277 2278 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2279 2280 // Note that in all of these cases, __block variables need the RHS 2281 // evaluated first just in case the variable gets moved by the RHS. 2282 2283 if (!hasAggregateLLVMType(E->getType())) { 2284 switch (E->getLHS()->getType().getObjCLifetime()) { 2285 case Qualifiers::OCL_Strong: 2286 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2287 2288 case Qualifiers::OCL_Autoreleasing: 2289 return EmitARCStoreAutoreleasing(E).first; 2290 2291 // No reason to do any of these differently. 2292 case Qualifiers::OCL_None: 2293 case Qualifiers::OCL_ExplicitNone: 2294 case Qualifiers::OCL_Weak: 2295 break; 2296 } 2297 2298 RValue RV = EmitAnyExpr(E->getRHS()); 2299 LValue LV = EmitLValue(E->getLHS()); 2300 EmitStoreThroughLValue(RV, LV); 2301 return LV; 2302 } 2303 2304 if (E->getType()->isAnyComplexType()) 2305 return EmitComplexAssignmentLValue(E); 2306 2307 return EmitAggExprToLValue(E); 2308} 2309 2310LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2311 RValue RV = EmitCallExpr(E); 2312 2313 if (!RV.isScalar()) 2314 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2315 2316 assert(E->getCallReturnType()->isReferenceType() && 2317 "Can't have a scalar return unless the return type is a " 2318 "reference type!"); 2319 2320 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2321} 2322 2323LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2324 // FIXME: This shouldn't require another copy. 2325 return EmitAggExprToLValue(E); 2326} 2327 2328LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2329 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2330 && "binding l-value to type which needs a temporary"); 2331 AggValueSlot Slot = CreateAggTemp(E->getType()); 2332 EmitCXXConstructExpr(E, Slot); 2333 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2334} 2335 2336LValue 2337CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2338 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2339} 2340 2341LValue 2342CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2343 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2344 Slot.setExternallyDestructed(); 2345 EmitAggExpr(E->getSubExpr(), Slot); 2346 EmitCXXTemporary(E->getTemporary(), Slot.getAddr()); 2347 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2348} 2349 2350LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2351 RValue RV = EmitObjCMessageExpr(E); 2352 2353 if (!RV.isScalar()) 2354 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2355 2356 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2357 "Can't have a scalar return unless the return type is a " 2358 "reference type!"); 2359 2360 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2361} 2362 2363LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2364 llvm::Value *V = 2365 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2366 return MakeAddrLValue(V, E->getType()); 2367} 2368 2369llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2370 const ObjCIvarDecl *Ivar) { 2371 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2372} 2373 2374LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2375 llvm::Value *BaseValue, 2376 const ObjCIvarDecl *Ivar, 2377 unsigned CVRQualifiers) { 2378 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2379 Ivar, CVRQualifiers); 2380} 2381 2382LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2383 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2384 llvm::Value *BaseValue = 0; 2385 const Expr *BaseExpr = E->getBase(); 2386 Qualifiers BaseQuals; 2387 QualType ObjectTy; 2388 if (E->isArrow()) { 2389 BaseValue = EmitScalarExpr(BaseExpr); 2390 ObjectTy = BaseExpr->getType()->getPointeeType(); 2391 BaseQuals = ObjectTy.getQualifiers(); 2392 } else { 2393 LValue BaseLV = EmitLValue(BaseExpr); 2394 // FIXME: this isn't right for bitfields. 2395 BaseValue = BaseLV.getAddress(); 2396 ObjectTy = BaseExpr->getType(); 2397 BaseQuals = ObjectTy.getQualifiers(); 2398 } 2399 2400 LValue LV = 2401 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2402 BaseQuals.getCVRQualifiers()); 2403 setObjCGCLValueClass(getContext(), E, LV); 2404 return LV; 2405} 2406 2407LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2408 // Can only get l-value for message expression returning aggregate type 2409 RValue RV = EmitAnyExprToTemp(E); 2410 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2411} 2412 2413RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2414 ReturnValueSlot ReturnValue, 2415 CallExpr::const_arg_iterator ArgBeg, 2416 CallExpr::const_arg_iterator ArgEnd, 2417 const Decl *TargetDecl) { 2418 // Get the actual function type. The callee type will always be a pointer to 2419 // function type or a block pointer type. 2420 assert(CalleeType->isFunctionPointerType() && 2421 "Call must have function pointer type!"); 2422 2423 CalleeType = getContext().getCanonicalType(CalleeType); 2424 2425 const FunctionType *FnType 2426 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2427 2428 CallArgList Args; 2429 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2430 2431 const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType); 2432 2433 // C99 6.5.2.2p6: 2434 // If the expression that denotes the called function has a type 2435 // that does not include a prototype, [the default argument 2436 // promotions are performed]. If the number of arguments does not 2437 // equal the number of parameters, the behavior is undefined. If 2438 // the function is defined with a type that includes a prototype, 2439 // and either the prototype ends with an ellipsis (, ...) or the 2440 // types of the arguments after promotion are not compatible with 2441 // the types of the parameters, the behavior is undefined. If the 2442 // function is defined with a type that does not include a 2443 // prototype, and the types of the arguments after promotion are 2444 // not compatible with those of the parameters after promotion, 2445 // the behavior is undefined [except in some trivial cases]. 2446 // That is, in the general case, we should assume that a call 2447 // through an unprototyped function type works like a *non-variadic* 2448 // call. The way we make this work is to cast to the exact type 2449 // of the promoted arguments. 2450 if (isa<FunctionNoProtoType>(FnType) && 2451 !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) { 2452 assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0)) 2453 ->isVarArg()); 2454 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false); 2455 CalleeTy = CalleeTy->getPointerTo(); 2456 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2457 } 2458 2459 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2460} 2461 2462LValue CodeGenFunction:: 2463EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2464 llvm::Value *BaseV; 2465 if (E->getOpcode() == BO_PtrMemI) 2466 BaseV = EmitScalarExpr(E->getLHS()); 2467 else 2468 BaseV = EmitLValue(E->getLHS()).getAddress(); 2469 2470 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2471 2472 const MemberPointerType *MPT 2473 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2474 2475 llvm::Value *AddV = 2476 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2477 2478 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2479} 2480 2481static void 2482EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2483 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2484 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2485 if (E->isCmpXChg()) { 2486 // Note that cmpxchg only supports specifying one ordering and 2487 // doesn't support weak cmpxchg, at least at the moment. 2488 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2489 LoadVal1->setAlignment(Align); 2490 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 2491 LoadVal2->setAlignment(Align); 2492 llvm::AtomicCmpXchgInst *CXI = 2493 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 2494 CXI->setVolatile(E->isVolatile()); 2495 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 2496 StoreVal1->setAlignment(Align); 2497 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 2498 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 2499 return; 2500 } 2501 2502 if (E->getOp() == AtomicExpr::Load) { 2503 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 2504 Load->setAtomic(Order); 2505 Load->setAlignment(Size); 2506 Load->setVolatile(E->isVolatile()); 2507 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 2508 StoreDest->setAlignment(Align); 2509 return; 2510 } 2511 2512 if (E->getOp() == AtomicExpr::Store) { 2513 assert(!Dest && "Store does not return a value"); 2514 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2515 LoadVal1->setAlignment(Align); 2516 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 2517 Store->setAtomic(Order); 2518 Store->setAlignment(Size); 2519 Store->setVolatile(E->isVolatile()); 2520 return; 2521 } 2522 2523 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 2524 switch (E->getOp()) { 2525 case AtomicExpr::CmpXchgWeak: 2526 case AtomicExpr::CmpXchgStrong: 2527 case AtomicExpr::Store: 2528 case AtomicExpr::Load: assert(0 && "Already handled!"); 2529 case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; 2530 case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; 2531 case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; 2532 case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; 2533 case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; 2534 case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; 2535 } 2536 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2537 LoadVal1->setAlignment(Align); 2538 llvm::AtomicRMWInst *RMWI = 2539 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 2540 RMWI->setVolatile(E->isVolatile()); 2541 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); 2542 StoreDest->setAlignment(Align); 2543} 2544 2545// This function emits any expression (scalar, complex, or aggregate) 2546// into a temporary alloca. 2547static llvm::Value * 2548EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 2549 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 2550 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 2551 /*Init*/ true); 2552 return DeclPtr; 2553} 2554 2555static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 2556 llvm::Value *Dest) { 2557 if (Ty->isAnyComplexType()) 2558 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 2559 if (CGF.hasAggregateLLVMType(Ty)) 2560 return RValue::getAggregate(Dest); 2561 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 2562} 2563 2564RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 2565 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 2566 QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); 2567 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 2568 uint64_t Size = sizeChars.getQuantity(); 2569 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 2570 unsigned Align = alignChars.getQuantity(); 2571 unsigned MaxInlineWidth = 2572 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 2573 bool UseLibcall = (Size != Align || Size > MaxInlineWidth); 2574 2575 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 2576 Ptr = EmitScalarExpr(E->getPtr()); 2577 Order = EmitScalarExpr(E->getOrder()); 2578 if (E->isCmpXChg()) { 2579 Val1 = EmitScalarExpr(E->getVal1()); 2580 Val2 = EmitValToTemp(*this, E->getVal2()); 2581 OrderFail = EmitScalarExpr(E->getOrderFail()); 2582 (void)OrderFail; // OrderFail is unused at the moment 2583 } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && 2584 MemTy->isPointerType()) { 2585 // For pointers, we're required to do a bit of math: adding 1 to an int* 2586 // is not the same as adding 1 to a uintptr_t. 2587 QualType Val1Ty = E->getVal1()->getType(); 2588 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 2589 CharUnits PointeeIncAmt = 2590 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 2591 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 2592 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 2593 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 2594 } else if (E->getOp() != AtomicExpr::Load) { 2595 Val1 = EmitValToTemp(*this, E->getVal1()); 2596 } 2597 2598 if (E->getOp() != AtomicExpr::Store && !Dest) 2599 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 2600 2601 if (UseLibcall) { 2602 // FIXME: Finalize what the libcalls are actually supposed to look like. 2603 // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 2604 return EmitUnsupportedRValue(E, "atomic library call"); 2605 } 2606#if 0 2607 if (UseLibcall) { 2608 const char* LibCallName; 2609 switch (E->getOp()) { 2610 case AtomicExpr::CmpXchgWeak: 2611 LibCallName = "__atomic_compare_exchange_generic"; break; 2612 case AtomicExpr::CmpXchgStrong: 2613 LibCallName = "__atomic_compare_exchange_generic"; break; 2614 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 2615 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 2616 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 2617 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 2618 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 2619 case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break; 2620 case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break; 2621 case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break; 2622 } 2623 llvm::SmallVector<QualType, 4> Params; 2624 CallArgList Args; 2625 QualType RetTy = getContext().VoidTy; 2626 if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg()) 2627 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 2628 getContext().VoidPtrTy); 2629 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 2630 getContext().VoidPtrTy); 2631 if (E->getOp() != AtomicExpr::Load) 2632 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 2633 getContext().VoidPtrTy); 2634 if (E->isCmpXChg()) { 2635 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 2636 getContext().VoidPtrTy); 2637 RetTy = getContext().IntTy; 2638 } 2639 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 2640 getContext().getSizeType()); 2641 const CGFunctionInfo &FuncInfo = 2642 CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo()); 2643 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false); 2644 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 2645 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 2646 if (E->isCmpXChg()) 2647 return Res; 2648 if (E->getOp() == AtomicExpr::Store) 2649 return RValue::get(0); 2650 return ConvertTempToRValue(*this, E->getType(), Dest); 2651 } 2652#endif 2653 llvm::Type *IPtrTy = 2654 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 2655 llvm::Value *OrigDest = Dest; 2656 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 2657 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 2658 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 2659 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 2660 2661 if (isa<llvm::ConstantInt>(Order)) { 2662 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 2663 switch (ord) { 2664 case 0: // memory_order_relaxed 2665 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2666 llvm::Monotonic); 2667 break; 2668 case 1: // memory_order_consume 2669 case 2: // memory_order_acquire 2670 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2671 llvm::Acquire); 2672 break; 2673 case 3: // memory_order_release 2674 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2675 llvm::Release); 2676 break; 2677 case 4: // memory_order_acq_rel 2678 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2679 llvm::AcquireRelease); 2680 break; 2681 case 5: // memory_order_seq_cst 2682 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2683 llvm::SequentiallyConsistent); 2684 break; 2685 default: // invalid order 2686 // We should not ever get here normally, but it's hard to 2687 // enforce that in general. 2688 break; 2689 } 2690 if (E->getOp() == AtomicExpr::Store) 2691 return RValue::get(0); 2692 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2693 } 2694 2695 // Long case, when Order isn't obviously constant. 2696 2697 // Create all the relevant BB's 2698 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 2699 *AcqRelBB = 0, *SeqCstBB = 0; 2700 MonotonicBB = createBasicBlock("monotonic", CurFn); 2701 if (E->getOp() != AtomicExpr::Store) 2702 AcquireBB = createBasicBlock("acquire", CurFn); 2703 if (E->getOp() != AtomicExpr::Load) 2704 ReleaseBB = createBasicBlock("release", CurFn); 2705 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) 2706 AcqRelBB = createBasicBlock("acqrel", CurFn); 2707 SeqCstBB = createBasicBlock("seqcst", CurFn); 2708 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 2709 2710 // Create the switch for the split 2711 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 2712 // doesn't matter unless someone is crazy enough to use something that 2713 // doesn't fold to a constant for the ordering. 2714 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 2715 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 2716 2717 // Emit all the different atomics 2718 Builder.SetInsertPoint(MonotonicBB); 2719 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2720 llvm::Monotonic); 2721 Builder.CreateBr(ContBB); 2722 if (E->getOp() != AtomicExpr::Store) { 2723 Builder.SetInsertPoint(AcquireBB); 2724 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2725 llvm::Acquire); 2726 Builder.CreateBr(ContBB); 2727 SI->addCase(Builder.getInt32(1), AcquireBB); 2728 SI->addCase(Builder.getInt32(2), AcquireBB); 2729 } 2730 if (E->getOp() != AtomicExpr::Load) { 2731 Builder.SetInsertPoint(ReleaseBB); 2732 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2733 llvm::Release); 2734 Builder.CreateBr(ContBB); 2735 SI->addCase(Builder.getInt32(3), ReleaseBB); 2736 } 2737 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) { 2738 Builder.SetInsertPoint(AcqRelBB); 2739 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2740 llvm::AcquireRelease); 2741 Builder.CreateBr(ContBB); 2742 SI->addCase(Builder.getInt32(4), AcqRelBB); 2743 } 2744 Builder.SetInsertPoint(SeqCstBB); 2745 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2746 llvm::SequentiallyConsistent); 2747 Builder.CreateBr(ContBB); 2748 SI->addCase(Builder.getInt32(5), SeqCstBB); 2749 2750 // Cleanup and return 2751 Builder.SetInsertPoint(ContBB); 2752 if (E->getOp() == AtomicExpr::Store) 2753 return RValue::get(0); 2754 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2755} 2756 2757void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN, 2758 unsigned AccuracyD) { 2759 assert(Val->getType()->isFPOrFPVectorTy()); 2760 if (!AccuracyN || !isa<llvm::Instruction>(Val)) 2761 return; 2762 2763 llvm::Value *Vals[2]; 2764 Vals[0] = llvm::ConstantInt::get(Int32Ty, AccuracyN); 2765 Vals[1] = llvm::ConstantInt::get(Int32Ty, AccuracyD); 2766 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), Vals); 2767 2768 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy, 2769 Node); 2770} 2771