CGExpr.cpp revision cdc2e82c317163693c99a8cbef76685e86420395
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CGCXXABI.h" 16#include "CGCall.h" 17#include "CGDebugInfo.h" 18#include "CGObjCRuntime.h" 19#include "CGRecordLayout.h" 20#include "CodeGenModule.h" 21#include "TargetInfo.h" 22#include "clang/AST/ASTContext.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Basic/ConvertUTF.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/ADT/Hashing.h" 27#include "llvm/DataLayout.h" 28#include "llvm/Intrinsics.h" 29#include "llvm/LLVMContext.h" 30#include "llvm/MDBuilder.h" 31using namespace clang; 32using namespace CodeGen; 33 34//===--------------------------------------------------------------------===// 35// Miscellaneous Helper Methods 36//===--------------------------------------------------------------------===// 37 38llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 39 unsigned addressSpace = 40 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 41 42 llvm::PointerType *destType = Int8PtrTy; 43 if (addressSpace) 44 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 45 46 if (value->getType() == destType) return value; 47 return Builder.CreateBitCast(value, destType); 48} 49 50/// CreateTempAlloca - This creates a alloca and inserts it into the entry 51/// block. 52llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 53 const Twine &Name) { 54 if (!Builder.isNamePreserving()) 55 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 56 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 57} 58 59void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 60 llvm::Value *Init) { 61 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 62 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 63 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 64} 65 66llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 67 const Twine &Name) { 68 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 69 // FIXME: Should we prefer the preferred type alignment here? 70 CharUnits Align = getContext().getTypeAlignInChars(Ty); 71 Alloc->setAlignment(Align.getQuantity()); 72 return Alloc; 73} 74 75llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 76 const Twine &Name) { 77 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 78 // FIXME: Should we prefer the preferred type alignment here? 79 CharUnits Align = getContext().getTypeAlignInChars(Ty); 80 Alloc->setAlignment(Align.getQuantity()); 81 return Alloc; 82} 83 84/// EvaluateExprAsBool - Perform the usual unary conversions on the specified 85/// expression and compare the result against zero, returning an Int1Ty value. 86llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 87 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 88 llvm::Value *MemPtr = EmitScalarExpr(E); 89 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 90 } 91 92 QualType BoolTy = getContext().BoolTy; 93 if (!E->getType()->isAnyComplexType()) 94 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 95 96 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 97} 98 99/// EmitIgnoredExpr - Emit code to compute the specified expression, 100/// ignoring the result. 101void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 102 if (E->isRValue()) 103 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 104 105 // Just emit it as an l-value and drop the result. 106 EmitLValue(E); 107} 108 109/// EmitAnyExpr - Emit code to compute the specified expression which 110/// can have any type. The result is returned as an RValue struct. 111/// If this is an aggregate expression, AggSlot indicates where the 112/// result should be returned. 113RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 114 AggValueSlot aggSlot, 115 bool ignoreResult) { 116 if (!hasAggregateLLVMType(E->getType())) 117 return RValue::get(EmitScalarExpr(E, ignoreResult)); 118 else if (E->getType()->isAnyComplexType()) 119 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 120 121 if (!ignoreResult && aggSlot.isIgnored()) 122 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 123 EmitAggExpr(E, aggSlot); 124 return aggSlot.asRValue(); 125} 126 127/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 128/// always be accessible even if no aggregate location is provided. 129RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 130 AggValueSlot AggSlot = AggValueSlot::ignored(); 131 132 if (hasAggregateLLVMType(E->getType()) && 133 !E->getType()->isAnyComplexType()) 134 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 135 return EmitAnyExpr(E, AggSlot); 136} 137 138/// EmitAnyExprToMem - Evaluate an expression into a given memory 139/// location. 140void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 141 llvm::Value *Location, 142 Qualifiers Quals, 143 bool IsInit) { 144 // FIXME: This function should take an LValue as an argument. 145 if (E->getType()->isAnyComplexType()) { 146 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 147 } else if (hasAggregateLLVMType(E->getType())) { 148 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 149 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 150 AggValueSlot::IsDestructed_t(IsInit), 151 AggValueSlot::DoesNotNeedGCBarriers, 152 AggValueSlot::IsAliased_t(!IsInit))); 153 } else { 154 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 155 LValue LV = MakeAddrLValue(Location, E->getType()); 156 EmitStoreThroughLValue(RV, LV); 157 } 158} 159 160static llvm::Value * 161CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 162 const NamedDecl *InitializedDecl) { 163 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 164 if (VD->hasGlobalStorage()) { 165 SmallString<256> Name; 166 llvm::raw_svector_ostream Out(Name); 167 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 168 Out.flush(); 169 170 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 171 172 // Create the reference temporary. 173 llvm::GlobalValue *RefTemp = 174 new llvm::GlobalVariable(CGF.CGM.getModule(), 175 RefTempTy, /*isConstant=*/false, 176 llvm::GlobalValue::InternalLinkage, 177 llvm::Constant::getNullValue(RefTempTy), 178 Name.str()); 179 return RefTemp; 180 } 181 } 182 183 return CGF.CreateMemTemp(Type, "ref.tmp"); 184} 185 186static llvm::Value * 187EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 188 llvm::Value *&ReferenceTemporary, 189 const CXXDestructorDecl *&ReferenceTemporaryDtor, 190 QualType &ObjCARCReferenceLifetimeType, 191 const NamedDecl *InitializedDecl) { 192 const MaterializeTemporaryExpr *M = NULL; 193 E = E->findMaterializedTemporary(M); 194 // Objective-C++ ARC: 195 // If we are binding a reference to a temporary that has ownership, we 196 // need to perform retain/release operations on the temporary. 197 if (M && CGF.getLangOpts().ObjCAutoRefCount && 198 M->getType()->isObjCLifetimeType() && 199 (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 200 M->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 201 M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 202 ObjCARCReferenceLifetimeType = M->getType(); 203 204 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 205 CGF.enterFullExpression(EWC); 206 CodeGenFunction::RunCleanupsScope Scope(CGF); 207 208 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 209 ReferenceTemporary, 210 ReferenceTemporaryDtor, 211 ObjCARCReferenceLifetimeType, 212 InitializedDecl); 213 } 214 215 RValue RV; 216 if (E->isGLValue()) { 217 // Emit the expression as an lvalue. 218 LValue LV = CGF.EmitLValue(E); 219 220 if (LV.isSimple()) 221 return LV.getAddress(); 222 223 // We have to load the lvalue. 224 RV = CGF.EmitLoadOfLValue(LV); 225 } else { 226 if (!ObjCARCReferenceLifetimeType.isNull()) { 227 ReferenceTemporary = CreateReferenceTemporary(CGF, 228 ObjCARCReferenceLifetimeType, 229 InitializedDecl); 230 231 232 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 233 ObjCARCReferenceLifetimeType); 234 235 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 236 RefTempDst, false); 237 238 bool ExtendsLifeOfTemporary = false; 239 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 240 if (Var->extendsLifetimeOfTemporary()) 241 ExtendsLifeOfTemporary = true; 242 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 243 ExtendsLifeOfTemporary = true; 244 } 245 246 if (!ExtendsLifeOfTemporary) { 247 // Since the lifetime of this temporary isn't going to be extended, 248 // we need to clean it up ourselves at the end of the full expression. 249 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 250 case Qualifiers::OCL_None: 251 case Qualifiers::OCL_ExplicitNone: 252 case Qualifiers::OCL_Autoreleasing: 253 break; 254 255 case Qualifiers::OCL_Strong: { 256 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 257 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 258 CGF.pushDestroy(cleanupKind, 259 ReferenceTemporary, 260 ObjCARCReferenceLifetimeType, 261 CodeGenFunction::destroyARCStrongImprecise, 262 cleanupKind & EHCleanup); 263 break; 264 } 265 266 case Qualifiers::OCL_Weak: 267 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 268 CGF.pushDestroy(NormalAndEHCleanup, 269 ReferenceTemporary, 270 ObjCARCReferenceLifetimeType, 271 CodeGenFunction::destroyARCWeak, 272 /*useEHCleanupForArray*/ true); 273 break; 274 } 275 276 ObjCARCReferenceLifetimeType = QualType(); 277 } 278 279 return ReferenceTemporary; 280 } 281 282 SmallVector<SubobjectAdjustment, 2> Adjustments; 283 E = E->skipRValueSubobjectAdjustments(Adjustments); 284 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 285 if (opaque->getType()->isRecordType()) 286 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 287 288 // Create a reference temporary if necessary. 289 AggValueSlot AggSlot = AggValueSlot::ignored(); 290 if (CGF.hasAggregateLLVMType(E->getType()) && 291 !E->getType()->isAnyComplexType()) { 292 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 293 InitializedDecl); 294 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType()); 295 AggValueSlot::IsDestructed_t isDestructed 296 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 297 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment, 298 Qualifiers(), isDestructed, 299 AggValueSlot::DoesNotNeedGCBarriers, 300 AggValueSlot::IsNotAliased); 301 } 302 303 if (InitializedDecl) { 304 // Get the destructor for the reference temporary. 305 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 306 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 307 if (!ClassDecl->hasTrivialDestructor()) 308 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 309 } 310 } 311 312 RV = CGF.EmitAnyExpr(E, AggSlot); 313 314 // Check if need to perform derived-to-base casts and/or field accesses, to 315 // get from the temporary object we created (and, potentially, for which we 316 // extended the lifetime) to the subobject we're binding the reference to. 317 if (!Adjustments.empty()) { 318 llvm::Value *Object = RV.getAggregateAddr(); 319 for (unsigned I = Adjustments.size(); I != 0; --I) { 320 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 321 switch (Adjustment.Kind) { 322 case SubobjectAdjustment::DerivedToBaseAdjustment: 323 Object = 324 CGF.GetAddressOfBaseClass(Object, 325 Adjustment.DerivedToBase.DerivedClass, 326 Adjustment.DerivedToBase.BasePath->path_begin(), 327 Adjustment.DerivedToBase.BasePath->path_end(), 328 /*NullCheckValue=*/false); 329 break; 330 331 case SubobjectAdjustment::FieldAdjustment: { 332 LValue LV = CGF.MakeAddrLValue(Object, E->getType()); 333 LV = CGF.EmitLValueForField(LV, Adjustment.Field); 334 if (LV.isSimple()) { 335 Object = LV.getAddress(); 336 break; 337 } 338 339 // For non-simple lvalues, we actually have to create a copy of 340 // the object we're binding to. 341 QualType T = Adjustment.Field->getType().getNonReferenceType() 342 .getUnqualifiedType(); 343 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 344 LValue TempLV = CGF.MakeAddrLValue(Object, 345 Adjustment.Field->getType()); 346 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 347 break; 348 } 349 350 case SubobjectAdjustment::MemberPointerAdjustment: { 351 llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS); 352 Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress( 353 CGF, Object, Ptr, Adjustment.Ptr.MPT); 354 break; 355 } 356 } 357 } 358 359 return Object; 360 } 361 } 362 363 if (RV.isAggregate()) 364 return RV.getAggregateAddr(); 365 366 // Create a temporary variable that we can bind the reference to. 367 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 368 InitializedDecl); 369 370 371 unsigned Alignment = 372 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 373 if (RV.isScalar()) 374 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 375 /*Volatile=*/false, Alignment, E->getType()); 376 else 377 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 378 /*Volatile=*/false); 379 return ReferenceTemporary; 380} 381 382RValue 383CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 384 const NamedDecl *InitializedDecl) { 385 llvm::Value *ReferenceTemporary = 0; 386 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 387 QualType ObjCARCReferenceLifetimeType; 388 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 389 ReferenceTemporaryDtor, 390 ObjCARCReferenceLifetimeType, 391 InitializedDecl); 392 if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) { 393 // C++11 [dcl.ref]p5 (as amended by core issue 453): 394 // If a glvalue to which a reference is directly bound designates neither 395 // an existing object or function of an appropriate type nor a region of 396 // storage of suitable size and alignment to contain an object of the 397 // reference's type, the behavior is undefined. 398 QualType Ty = E->getType(); 399 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 400 } 401 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 402 return RValue::get(Value); 403 404 // Make sure to call the destructor for the reference temporary. 405 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 406 if (VD && VD->hasGlobalStorage()) { 407 if (ReferenceTemporaryDtor) { 408 llvm::Constant *DtorFn = 409 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 410 CGM.getCXXABI().registerGlobalDtor(*this, DtorFn, 411 cast<llvm::Constant>(ReferenceTemporary)); 412 } else { 413 assert(!ObjCARCReferenceLifetimeType.isNull()); 414 // Note: We intentionally do not register a global "destructor" to 415 // release the object. 416 } 417 418 return RValue::get(Value); 419 } 420 421 if (ReferenceTemporaryDtor) 422 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 423 else { 424 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 425 case Qualifiers::OCL_None: 426 llvm_unreachable( 427 "Not a reference temporary that needs to be deallocated"); 428 case Qualifiers::OCL_ExplicitNone: 429 case Qualifiers::OCL_Autoreleasing: 430 // Nothing to do. 431 break; 432 433 case Qualifiers::OCL_Strong: { 434 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 435 CleanupKind cleanupKind = getARCCleanupKind(); 436 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 437 precise ? destroyARCStrongPrecise : destroyARCStrongImprecise, 438 cleanupKind & EHCleanup); 439 break; 440 } 441 442 case Qualifiers::OCL_Weak: { 443 // __weak objects always get EH cleanups; otherwise, exceptions 444 // could cause really nasty crashes instead of mere leaks. 445 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 446 ObjCARCReferenceLifetimeType, destroyARCWeak, true); 447 break; 448 } 449 } 450 } 451 452 return RValue::get(Value); 453} 454 455 456/// getAccessedFieldNo - Given an encoded value and a result number, return the 457/// input field number being accessed. 458unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 459 const llvm::Constant *Elts) { 460 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 461 ->getZExtValue(); 462} 463 464/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 465static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 466 llvm::Value *High) { 467 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 468 llvm::Value *K47 = Builder.getInt64(47); 469 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 470 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 471 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 472 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 473 return Builder.CreateMul(B1, KMul); 474} 475 476void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 477 llvm::Value *Address, 478 QualType Ty, CharUnits Alignment) { 479 if (!SanitizePerformTypeCheck) 480 return; 481 482 // Don't check pointers outside the default address space. The null check 483 // isn't correct, the object-size check isn't supported by LLVM, and we can't 484 // communicate the addresses to the runtime handler for the vptr check. 485 if (Address->getType()->getPointerAddressSpace()) 486 return; 487 488 llvm::Value *Cond = 0; 489 490 if (getLangOpts().SanitizeNull) { 491 // The glvalue must not be an empty glvalue. 492 Cond = Builder.CreateICmpNE( 493 Address, llvm::Constant::getNullValue(Address->getType())); 494 } 495 496 if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) { 497 uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity(); 498 499 // The glvalue must refer to a large enough storage region. 500 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 501 // to check this. 502 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 503 llvm::Value *Min = Builder.getFalse(); 504 llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy); 505 llvm::Value *LargeEnough = 506 Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min), 507 llvm::ConstantInt::get(IntPtrTy, Size)); 508 Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough; 509 } 510 511 uint64_t AlignVal = 0; 512 513 if (getLangOpts().SanitizeAlignment) { 514 AlignVal = Alignment.getQuantity(); 515 if (!Ty->isIncompleteType() && !AlignVal) 516 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); 517 518 // The glvalue must be suitably aligned. 519 if (AlignVal) { 520 llvm::Value *Align = 521 Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy), 522 llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 523 llvm::Value *Aligned = 524 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 525 Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned; 526 } 527 } 528 529 if (Cond) { 530 llvm::Constant *StaticData[] = { 531 EmitCheckSourceLocation(Loc), 532 EmitCheckTypeDescriptor(Ty), 533 llvm::ConstantInt::get(SizeTy, AlignVal), 534 llvm::ConstantInt::get(Int8Ty, TCK) 535 }; 536 EmitCheck(Cond, "type_mismatch", StaticData, Address, CRK_Recoverable); 537 } 538 539 // If possible, check that the vptr indicates that there is a subobject of 540 // type Ty at offset zero within this object. 541 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 542 if (getLangOpts().SanitizeVptr && TCK != TCK_ConstructorCall && 543 RD && RD->hasDefinition() && RD->isDynamicClass()) { 544 // Compute a hash of the mangled name of the type. 545 // 546 // FIXME: This is not guaranteed to be deterministic! Move to a 547 // fingerprinting mechanism once LLVM provides one. For the time 548 // being the implementation happens to be deterministic. 549 llvm::SmallString<64> MangledName; 550 llvm::raw_svector_ostream Out(MangledName); 551 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 552 Out); 553 llvm::hash_code TypeHash = hash_value(Out.str()); 554 555 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 556 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 557 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 558 llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy); 559 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 560 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 561 562 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 563 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 564 565 // Look the hash up in our cache. 566 const int CacheSize = 128; 567 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 568 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 569 "__ubsan_vptr_type_cache"); 570 llvm::Value *Slot = Builder.CreateAnd(Hash, 571 llvm::ConstantInt::get(IntPtrTy, 572 CacheSize-1)); 573 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 574 llvm::Value *CacheVal = 575 Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices)); 576 577 // If the hash isn't in the cache, call a runtime handler to perform the 578 // hard work of checking whether the vptr is for an object of the right 579 // type. This will either fill in the cache and return, or produce a 580 // diagnostic. 581 llvm::Constant *StaticData[] = { 582 EmitCheckSourceLocation(Loc), 583 EmitCheckTypeDescriptor(Ty), 584 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 585 llvm::ConstantInt::get(Int8Ty, TCK) 586 }; 587 llvm::Value *DynamicData[] = { Address, Hash }; 588 EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash), 589 "dynamic_type_cache_miss", StaticData, DynamicData, 590 CRK_AlwaysRecoverable); 591 } 592} 593 594 595CodeGenFunction::ComplexPairTy CodeGenFunction:: 596EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 597 bool isInc, bool isPre) { 598 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 599 LV.isVolatileQualified()); 600 601 llvm::Value *NextVal; 602 if (isa<llvm::IntegerType>(InVal.first->getType())) { 603 uint64_t AmountVal = isInc ? 1 : -1; 604 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 605 606 // Add the inc/dec to the real part. 607 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 608 } else { 609 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 610 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 611 if (!isInc) 612 FVal.changeSign(); 613 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 614 615 // Add the inc/dec to the real part. 616 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 617 } 618 619 ComplexPairTy IncVal(NextVal, InVal.second); 620 621 // Store the updated result through the lvalue. 622 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 623 624 // If this is a postinc, return the value read from memory, otherwise use the 625 // updated value. 626 return isPre ? IncVal : InVal; 627} 628 629 630//===----------------------------------------------------------------------===// 631// LValue Expression Emission 632//===----------------------------------------------------------------------===// 633 634RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 635 if (Ty->isVoidType()) 636 return RValue::get(0); 637 638 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 639 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 640 llvm::Value *U = llvm::UndefValue::get(EltTy); 641 return RValue::getComplex(std::make_pair(U, U)); 642 } 643 644 // If this is a use of an undefined aggregate type, the aggregate must have an 645 // identifiable address. Just because the contents of the value are undefined 646 // doesn't mean that the address can't be taken and compared. 647 if (hasAggregateLLVMType(Ty)) { 648 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 649 return RValue::getAggregate(DestPtr); 650 } 651 652 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 653} 654 655RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 656 const char *Name) { 657 ErrorUnsupported(E, Name); 658 return GetUndefRValue(E->getType()); 659} 660 661LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 662 const char *Name) { 663 ErrorUnsupported(E, Name); 664 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 665 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 666} 667 668LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 669 LValue LV = EmitLValue(E); 670 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 671 EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(), 672 E->getType(), LV.getAlignment()); 673 return LV; 674} 675 676/// EmitLValue - Emit code to compute a designator that specifies the location 677/// of the expression. 678/// 679/// This can return one of two things: a simple address or a bitfield reference. 680/// In either case, the LLVM Value* in the LValue structure is guaranteed to be 681/// an LLVM pointer type. 682/// 683/// If this returns a bitfield reference, nothing about the pointee type of the 684/// LLVM value is known: For example, it may not be a pointer to an integer. 685/// 686/// If this returns a normal address, and if the lvalue's C type is fixed size, 687/// this method guarantees that the returned pointer type will point to an LLVM 688/// type of the same size of the lvalue's type. If the lvalue has a variable 689/// length type, this is not possible. 690/// 691LValue CodeGenFunction::EmitLValue(const Expr *E) { 692 switch (E->getStmtClass()) { 693 default: return EmitUnsupportedLValue(E, "l-value expression"); 694 695 case Expr::ObjCPropertyRefExprClass: 696 llvm_unreachable("cannot emit a property reference directly"); 697 698 case Expr::ObjCSelectorExprClass: 699 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 700 case Expr::ObjCIsaExprClass: 701 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 702 case Expr::BinaryOperatorClass: 703 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 704 case Expr::CompoundAssignOperatorClass: 705 if (!E->getType()->isAnyComplexType()) 706 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 707 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 708 case Expr::CallExprClass: 709 case Expr::CXXMemberCallExprClass: 710 case Expr::CXXOperatorCallExprClass: 711 case Expr::UserDefinedLiteralClass: 712 return EmitCallExprLValue(cast<CallExpr>(E)); 713 case Expr::VAArgExprClass: 714 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 715 case Expr::DeclRefExprClass: 716 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 717 case Expr::ParenExprClass: 718 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 719 case Expr::GenericSelectionExprClass: 720 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 721 case Expr::PredefinedExprClass: 722 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 723 case Expr::StringLiteralClass: 724 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 725 case Expr::ObjCEncodeExprClass: 726 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 727 case Expr::PseudoObjectExprClass: 728 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 729 case Expr::InitListExprClass: 730 return EmitInitListLValue(cast<InitListExpr>(E)); 731 case Expr::CXXTemporaryObjectExprClass: 732 case Expr::CXXConstructExprClass: 733 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 734 case Expr::CXXBindTemporaryExprClass: 735 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 736 case Expr::CXXUuidofExprClass: 737 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 738 case Expr::LambdaExprClass: 739 return EmitLambdaLValue(cast<LambdaExpr>(E)); 740 741 case Expr::ExprWithCleanupsClass: { 742 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 743 enterFullExpression(cleanups); 744 RunCleanupsScope Scope(*this); 745 return EmitLValue(cleanups->getSubExpr()); 746 } 747 748 case Expr::CXXScalarValueInitExprClass: 749 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 750 case Expr::CXXDefaultArgExprClass: 751 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 752 case Expr::CXXTypeidExprClass: 753 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 754 755 case Expr::ObjCMessageExprClass: 756 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 757 case Expr::ObjCIvarRefExprClass: 758 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 759 case Expr::StmtExprClass: 760 return EmitStmtExprLValue(cast<StmtExpr>(E)); 761 case Expr::UnaryOperatorClass: 762 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 763 case Expr::ArraySubscriptExprClass: 764 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 765 case Expr::ExtVectorElementExprClass: 766 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 767 case Expr::MemberExprClass: 768 return EmitMemberExpr(cast<MemberExpr>(E)); 769 case Expr::CompoundLiteralExprClass: 770 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 771 case Expr::ConditionalOperatorClass: 772 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 773 case Expr::BinaryConditionalOperatorClass: 774 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 775 case Expr::ChooseExprClass: 776 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 777 case Expr::OpaqueValueExprClass: 778 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 779 case Expr::SubstNonTypeTemplateParmExprClass: 780 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 781 case Expr::ImplicitCastExprClass: 782 case Expr::CStyleCastExprClass: 783 case Expr::CXXFunctionalCastExprClass: 784 case Expr::CXXStaticCastExprClass: 785 case Expr::CXXDynamicCastExprClass: 786 case Expr::CXXReinterpretCastExprClass: 787 case Expr::CXXConstCastExprClass: 788 case Expr::ObjCBridgedCastExprClass: 789 return EmitCastLValue(cast<CastExpr>(E)); 790 791 case Expr::MaterializeTemporaryExprClass: 792 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 793 } 794} 795 796/// Given an object of the given canonical type, can we safely copy a 797/// value out of it based on its initializer? 798static bool isConstantEmittableObjectType(QualType type) { 799 assert(type.isCanonical()); 800 assert(!type->isReferenceType()); 801 802 // Must be const-qualified but non-volatile. 803 Qualifiers qs = type.getLocalQualifiers(); 804 if (!qs.hasConst() || qs.hasVolatile()) return false; 805 806 // Otherwise, all object types satisfy this except C++ classes with 807 // mutable subobjects or non-trivial copy/destroy behavior. 808 if (const RecordType *RT = dyn_cast<RecordType>(type)) 809 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 810 if (RD->hasMutableFields() || !RD->isTrivial()) 811 return false; 812 813 return true; 814} 815 816/// Can we constant-emit a load of a reference to a variable of the 817/// given type? This is different from predicates like 818/// Decl::isUsableInConstantExpressions because we do want it to apply 819/// in situations that don't necessarily satisfy the language's rules 820/// for this (e.g. C++'s ODR-use rules). For example, we want to able 821/// to do this with const float variables even if those variables 822/// aren't marked 'constexpr'. 823enum ConstantEmissionKind { 824 CEK_None, 825 CEK_AsReferenceOnly, 826 CEK_AsValueOrReference, 827 CEK_AsValueOnly 828}; 829static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 830 type = type.getCanonicalType(); 831 if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) { 832 if (isConstantEmittableObjectType(ref->getPointeeType())) 833 return CEK_AsValueOrReference; 834 return CEK_AsReferenceOnly; 835 } 836 if (isConstantEmittableObjectType(type)) 837 return CEK_AsValueOnly; 838 return CEK_None; 839} 840 841/// Try to emit a reference to the given value without producing it as 842/// an l-value. This is actually more than an optimization: we can't 843/// produce an l-value for variables that we never actually captured 844/// in a block or lambda, which means const int variables or constexpr 845/// literals or similar. 846CodeGenFunction::ConstantEmission 847CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 848 ValueDecl *value = refExpr->getDecl(); 849 850 // The value needs to be an enum constant or a constant variable. 851 ConstantEmissionKind CEK; 852 if (isa<ParmVarDecl>(value)) { 853 CEK = CEK_None; 854 } else if (VarDecl *var = dyn_cast<VarDecl>(value)) { 855 CEK = checkVarTypeForConstantEmission(var->getType()); 856 } else if (isa<EnumConstantDecl>(value)) { 857 CEK = CEK_AsValueOnly; 858 } else { 859 CEK = CEK_None; 860 } 861 if (CEK == CEK_None) return ConstantEmission(); 862 863 Expr::EvalResult result; 864 bool resultIsReference; 865 QualType resultType; 866 867 // It's best to evaluate all the way as an r-value if that's permitted. 868 if (CEK != CEK_AsReferenceOnly && 869 refExpr->EvaluateAsRValue(result, getContext())) { 870 resultIsReference = false; 871 resultType = refExpr->getType(); 872 873 // Otherwise, try to evaluate as an l-value. 874 } else if (CEK != CEK_AsValueOnly && 875 refExpr->EvaluateAsLValue(result, getContext())) { 876 resultIsReference = true; 877 resultType = value->getType(); 878 879 // Failure. 880 } else { 881 return ConstantEmission(); 882 } 883 884 // In any case, if the initializer has side-effects, abandon ship. 885 if (result.HasSideEffects) 886 return ConstantEmission(); 887 888 // Emit as a constant. 889 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this); 890 891 // Make sure we emit a debug reference to the global variable. 892 // This should probably fire even for 893 if (isa<VarDecl>(value)) { 894 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 895 EmitDeclRefExprDbgValue(refExpr, C); 896 } else { 897 assert(isa<EnumConstantDecl>(value)); 898 EmitDeclRefExprDbgValue(refExpr, C); 899 } 900 901 // If we emitted a reference constant, we need to dereference that. 902 if (resultIsReference) 903 return ConstantEmission::forReference(C); 904 905 return ConstantEmission::forValue(C); 906} 907 908llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 909 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 910 lvalue.getAlignment().getQuantity(), 911 lvalue.getType(), lvalue.getTBAAInfo()); 912} 913 914static bool hasBooleanRepresentation(QualType Ty) { 915 if (Ty->isBooleanType()) 916 return true; 917 918 if (const EnumType *ET = Ty->getAs<EnumType>()) 919 return ET->getDecl()->getIntegerType()->isBooleanType(); 920 921 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 922 return hasBooleanRepresentation(AT->getValueType()); 923 924 return false; 925} 926 927llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 928 const EnumType *ET = Ty->getAs<EnumType>(); 929 bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET && 930 CGM.getCodeGenOpts().StrictEnums && 931 !ET->getDecl()->isFixed()); 932 bool IsBool = hasBooleanRepresentation(Ty); 933 if (!IsBool && !IsRegularCPlusPlusEnum) 934 return NULL; 935 936 llvm::APInt Min; 937 llvm::APInt End; 938 if (IsBool) { 939 Min = llvm::APInt(getContext().getTypeSize(Ty), 0); 940 End = llvm::APInt(getContext().getTypeSize(Ty), 2); 941 } else { 942 const EnumDecl *ED = ET->getDecl(); 943 llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType()); 944 unsigned Bitwidth = LTy->getScalarSizeInBits(); 945 unsigned NumNegativeBits = ED->getNumNegativeBits(); 946 unsigned NumPositiveBits = ED->getNumPositiveBits(); 947 948 if (NumNegativeBits) { 949 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 950 assert(NumBits <= Bitwidth); 951 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 952 Min = -End; 953 } else { 954 assert(NumPositiveBits <= Bitwidth); 955 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 956 Min = llvm::APInt(Bitwidth, 0); 957 } 958 } 959 960 llvm::MDBuilder MDHelper(getLLVMContext()); 961 return MDHelper.createRange(Min, End); 962} 963 964llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 965 unsigned Alignment, QualType Ty, 966 llvm::MDNode *TBAAInfo) { 967 968 // For better performance, handle vector loads differently. 969 if (Ty->isVectorType()) { 970 llvm::Value *V; 971 const llvm::Type *EltTy = 972 cast<llvm::PointerType>(Addr->getType())->getElementType(); 973 974 const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy); 975 976 // Handle vectors of size 3, like size 4 for better performance. 977 if (VTy->getNumElements() == 3) { 978 979 // Bitcast to vec4 type. 980 llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), 981 4); 982 llvm::PointerType *ptVec4Ty = 983 llvm::PointerType::get(vec4Ty, 984 (cast<llvm::PointerType>( 985 Addr->getType()))->getAddressSpace()); 986 llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty, 987 "castToVec4"); 988 // Now load value. 989 llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 990 991 // Shuffle vector to get vec3. 992 llvm::Constant *Mask[] = { 993 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0), 994 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1), 995 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2) 996 }; 997 998 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 999 V = Builder.CreateShuffleVector(LoadVal, 1000 llvm::UndefValue::get(vec4Ty), 1001 MaskV, "extractVec"); 1002 return EmitFromMemory(V, Ty); 1003 } 1004 } 1005 1006 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 1007 if (Volatile) 1008 Load->setVolatile(true); 1009 if (Alignment) 1010 Load->setAlignment(Alignment); 1011 if (TBAAInfo) 1012 CGM.DecorateInstruction(Load, TBAAInfo); 1013 // If this is an atomic type, all normal reads must be atomic 1014 if (Ty->isAtomicType()) 1015 Load->setAtomic(llvm::SequentiallyConsistent); 1016 1017 if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1018 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1019 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1020 1021 return EmitFromMemory(Load, Ty); 1022} 1023 1024llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1025 // Bool has a different representation in memory than in registers. 1026 if (hasBooleanRepresentation(Ty)) { 1027 // This should really always be an i1, but sometimes it's already 1028 // an i8, and it's awkward to track those cases down. 1029 if (Value->getType()->isIntegerTy(1)) 1030 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); 1031 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1032 "wrong value rep of bool"); 1033 } 1034 1035 return Value; 1036} 1037 1038llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1039 // Bool has a different representation in memory than in registers. 1040 if (hasBooleanRepresentation(Ty)) { 1041 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1042 "wrong value rep of bool"); 1043 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1044 } 1045 1046 return Value; 1047} 1048 1049void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 1050 bool Volatile, unsigned Alignment, 1051 QualType Ty, 1052 llvm::MDNode *TBAAInfo, 1053 bool isInit) { 1054 1055 // Handle vectors differently to get better performance. 1056 if (Ty->isVectorType()) { 1057 llvm::Type *SrcTy = Value->getType(); 1058 llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy); 1059 // Handle vec3 special. 1060 if (VecTy->getNumElements() == 3) { 1061 llvm::LLVMContext &VMContext = getLLVMContext(); 1062 1063 // Our source is a vec3, do a shuffle vector to make it a vec4. 1064 llvm::SmallVector<llvm::Constant*, 4> Mask; 1065 Mask.push_back(llvm::ConstantInt::get( 1066 llvm::Type::getInt32Ty(VMContext), 1067 0)); 1068 Mask.push_back(llvm::ConstantInt::get( 1069 llvm::Type::getInt32Ty(VMContext), 1070 1)); 1071 Mask.push_back(llvm::ConstantInt::get( 1072 llvm::Type::getInt32Ty(VMContext), 1073 2)); 1074 Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext))); 1075 1076 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1077 Value = Builder.CreateShuffleVector(Value, 1078 llvm::UndefValue::get(VecTy), 1079 MaskV, "extractVec"); 1080 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); 1081 } 1082 llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 1083 if (DstPtr->getElementType() != SrcTy) { 1084 llvm::Type *MemTy = 1085 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 1086 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 1087 } 1088 } 1089 1090 Value = EmitToMemory(Value, Ty); 1091 1092 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1093 if (Alignment) 1094 Store->setAlignment(Alignment); 1095 if (TBAAInfo) 1096 CGM.DecorateInstruction(Store, TBAAInfo); 1097 if (!isInit && Ty->isAtomicType()) 1098 Store->setAtomic(llvm::SequentiallyConsistent); 1099} 1100 1101void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1102 bool isInit) { 1103 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 1104 lvalue.getAlignment().getQuantity(), lvalue.getType(), 1105 lvalue.getTBAAInfo(), isInit); 1106} 1107 1108/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1109/// method emits the address of the lvalue, then loads the result as an rvalue, 1110/// returning the rvalue. 1111RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 1112 if (LV.isObjCWeak()) { 1113 // load of a __weak object. 1114 llvm::Value *AddrWeakObj = LV.getAddress(); 1115 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1116 AddrWeakObj)); 1117 } 1118 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1119 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); 1120 Object = EmitObjCConsumeObject(LV.getType(), Object); 1121 return RValue::get(Object); 1122 } 1123 1124 if (LV.isSimple()) { 1125 assert(!LV.getType()->isFunctionType()); 1126 1127 // Everything needs a load. 1128 return RValue::get(EmitLoadOfScalar(LV)); 1129 } 1130 1131 if (LV.isVectorElt()) { 1132 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(), 1133 LV.isVolatileQualified()); 1134 Load->setAlignment(LV.getAlignment().getQuantity()); 1135 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1136 "vecext")); 1137 } 1138 1139 // If this is a reference to a subset of the elements of a vector, either 1140 // shuffle the input or extract/insert them as appropriate. 1141 if (LV.isExtVectorElt()) 1142 return EmitLoadOfExtVectorElementLValue(LV); 1143 1144 assert(LV.isBitField() && "Unknown LValue type!"); 1145 return EmitLoadOfBitfieldLValue(LV); 1146} 1147 1148RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 1149 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1150 1151 // Get the output type. 1152 llvm::Type *ResLTy = ConvertType(LV.getType()); 1153 1154 llvm::Value *Ptr = LV.getBitFieldAddr(); 1155 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), 1156 "bf.load"); 1157 cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment); 1158 1159 if (Info.IsSigned) { 1160 assert((Info.Offset + Info.Size) <= Info.StorageSize); 1161 unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; 1162 if (HighBits) 1163 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 1164 if (Info.Offset + HighBits) 1165 Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); 1166 } else { 1167 if (Info.Offset) 1168 Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); 1169 if (Info.Offset + Info.Size < Info.StorageSize) 1170 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, 1171 Info.Size), 1172 "bf.clear"); 1173 } 1174 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 1175 1176 return RValue::get(Val); 1177} 1178 1179// If this is a reference to a subset of the elements of a vector, create an 1180// appropriate shufflevector. 1181RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1182 llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(), 1183 LV.isVolatileQualified()); 1184 Load->setAlignment(LV.getAlignment().getQuantity()); 1185 llvm::Value *Vec = Load; 1186 1187 const llvm::Constant *Elts = LV.getExtVectorElts(); 1188 1189 // If the result of the expression is a non-vector type, we must be extracting 1190 // a single element. Just codegen as an extractelement. 1191 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1192 if (!ExprVT) { 1193 unsigned InIdx = getAccessedFieldNo(0, Elts); 1194 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1195 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 1196 } 1197 1198 // Always use shuffle vector to try to retain the original program structure 1199 unsigned NumResultElts = ExprVT->getNumElements(); 1200 1201 SmallVector<llvm::Constant*, 4> Mask; 1202 for (unsigned i = 0; i != NumResultElts; ++i) 1203 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 1204 1205 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1206 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 1207 MaskV); 1208 return RValue::get(Vec); 1209} 1210 1211 1212 1213/// EmitStoreThroughLValue - Store the specified rvalue into the specified 1214/// lvalue, where both are guaranteed to the have the same type, and that type 1215/// is 'Ty'. 1216void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { 1217 if (!Dst.isSimple()) { 1218 if (Dst.isVectorElt()) { 1219 // Read/modify/write the vector, inserting the new element. 1220 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(), 1221 Dst.isVolatileQualified()); 1222 Load->setAlignment(Dst.getAlignment().getQuantity()); 1223 llvm::Value *Vec = Load; 1224 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 1225 Dst.getVectorIdx(), "vecins"); 1226 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(), 1227 Dst.isVolatileQualified()); 1228 Store->setAlignment(Dst.getAlignment().getQuantity()); 1229 return; 1230 } 1231 1232 // If this is an update of extended vector elements, insert them as 1233 // appropriate. 1234 if (Dst.isExtVectorElt()) 1235 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 1236 1237 assert(Dst.isBitField() && "Unknown LValue type"); 1238 return EmitStoreThroughBitfieldLValue(Src, Dst); 1239 } 1240 1241 // There's special magic for assigning into an ARC-qualified l-value. 1242 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 1243 switch (Lifetime) { 1244 case Qualifiers::OCL_None: 1245 llvm_unreachable("present but none"); 1246 1247 case Qualifiers::OCL_ExplicitNone: 1248 // nothing special 1249 break; 1250 1251 case Qualifiers::OCL_Strong: 1252 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1253 return; 1254 1255 case Qualifiers::OCL_Weak: 1256 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1257 return; 1258 1259 case Qualifiers::OCL_Autoreleasing: 1260 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1261 Src.getScalarVal())); 1262 // fall into the normal path 1263 break; 1264 } 1265 } 1266 1267 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1268 // load of a __weak object. 1269 llvm::Value *LvalueDst = Dst.getAddress(); 1270 llvm::Value *src = Src.getScalarVal(); 1271 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1272 return; 1273 } 1274 1275 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1276 // load of a __strong object. 1277 llvm::Value *LvalueDst = Dst.getAddress(); 1278 llvm::Value *src = Src.getScalarVal(); 1279 if (Dst.isObjCIvar()) { 1280 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1281 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1282 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1283 llvm::Value *dst = RHS; 1284 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1285 llvm::Value *LHS = 1286 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1287 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1288 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1289 BytesBetween); 1290 } else if (Dst.isGlobalObjCRef()) { 1291 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1292 Dst.isThreadLocalRef()); 1293 } 1294 else 1295 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1296 return; 1297 } 1298 1299 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1300 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1301} 1302 1303void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1304 llvm::Value **Result) { 1305 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1306 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1307 llvm::Value *Ptr = Dst.getBitFieldAddr(); 1308 1309 // Get the source value, truncated to the width of the bit-field. 1310 llvm::Value *SrcVal = Src.getScalarVal(); 1311 1312 // Cast the source to the storage type and shift it into place. 1313 SrcVal = Builder.CreateIntCast(SrcVal, 1314 Ptr->getType()->getPointerElementType(), 1315 /*IsSigned=*/false); 1316 llvm::Value *MaskedVal = SrcVal; 1317 1318 // See if there are other bits in the bitfield's storage we'll need to load 1319 // and mask together with source before storing. 1320 if (Info.StorageSize != Info.Size) { 1321 assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); 1322 llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 1323 "bf.load"); 1324 cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment); 1325 1326 // Mask the source value as needed. 1327 if (!hasBooleanRepresentation(Dst.getType())) 1328 SrcVal = Builder.CreateAnd(SrcVal, 1329 llvm::APInt::getLowBitsSet(Info.StorageSize, 1330 Info.Size), 1331 "bf.value"); 1332 MaskedVal = SrcVal; 1333 if (Info.Offset) 1334 SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); 1335 1336 // Mask out the original value. 1337 Val = Builder.CreateAnd(Val, 1338 ~llvm::APInt::getBitsSet(Info.StorageSize, 1339 Info.Offset, 1340 Info.Offset + Info.Size), 1341 "bf.clear"); 1342 1343 // Or together the unchanged values and the source value. 1344 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 1345 } else { 1346 assert(Info.Offset == 0); 1347 } 1348 1349 // Write the new value back out. 1350 llvm::StoreInst *Store = Builder.CreateStore(SrcVal, Ptr, 1351 Dst.isVolatileQualified()); 1352 Store->setAlignment(Info.StorageAlignment); 1353 1354 // Return the new value of the bit-field, if requested. 1355 if (Result) { 1356 llvm::Value *ResultVal = MaskedVal; 1357 1358 // Sign extend the value if needed. 1359 if (Info.IsSigned) { 1360 assert(Info.Size <= Info.StorageSize); 1361 unsigned HighBits = Info.StorageSize - Info.Size; 1362 if (HighBits) { 1363 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 1364 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 1365 } 1366 } 1367 1368 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 1369 "bf.result.cast"); 1370 *Result = ResultVal; 1371 } 1372} 1373 1374void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1375 LValue Dst) { 1376 // This access turns into a read/modify/write of the vector. Load the input 1377 // value now. 1378 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(), 1379 Dst.isVolatileQualified()); 1380 Load->setAlignment(Dst.getAlignment().getQuantity()); 1381 llvm::Value *Vec = Load; 1382 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1383 1384 llvm::Value *SrcVal = Src.getScalarVal(); 1385 1386 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1387 unsigned NumSrcElts = VTy->getNumElements(); 1388 unsigned NumDstElts = 1389 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1390 if (NumDstElts == NumSrcElts) { 1391 // Use shuffle vector is the src and destination are the same number of 1392 // elements and restore the vector mask since it is on the side it will be 1393 // stored. 1394 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1395 for (unsigned i = 0; i != NumSrcElts; ++i) 1396 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 1397 1398 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1399 Vec = Builder.CreateShuffleVector(SrcVal, 1400 llvm::UndefValue::get(Vec->getType()), 1401 MaskV); 1402 } else if (NumDstElts > NumSrcElts) { 1403 // Extended the source vector to the same length and then shuffle it 1404 // into the destination. 1405 // FIXME: since we're shuffling with undef, can we just use the indices 1406 // into that? This could be simpler. 1407 SmallVector<llvm::Constant*, 4> ExtMask; 1408 for (unsigned i = 0; i != NumSrcElts; ++i) 1409 ExtMask.push_back(Builder.getInt32(i)); 1410 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); 1411 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1412 llvm::Value *ExtSrcVal = 1413 Builder.CreateShuffleVector(SrcVal, 1414 llvm::UndefValue::get(SrcVal->getType()), 1415 ExtMaskV); 1416 // build identity 1417 SmallVector<llvm::Constant*, 4> Mask; 1418 for (unsigned i = 0; i != NumDstElts; ++i) 1419 Mask.push_back(Builder.getInt32(i)); 1420 1421 // modify when what gets shuffled in 1422 for (unsigned i = 0; i != NumSrcElts; ++i) 1423 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 1424 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1425 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1426 } else { 1427 // We should never shorten the vector 1428 llvm_unreachable("unexpected shorten vector length"); 1429 } 1430 } else { 1431 // If the Src is a scalar (not a vector) it must be updating one element. 1432 unsigned InIdx = getAccessedFieldNo(0, Elts); 1433 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1434 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1435 } 1436 1437 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(), 1438 Dst.isVolatileQualified()); 1439 Store->setAlignment(Dst.getAlignment().getQuantity()); 1440} 1441 1442// setObjCGCLValueClass - sets class of he lvalue for the purpose of 1443// generating write-barries API. It is currently a global, ivar, 1444// or neither. 1445static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1446 LValue &LV, 1447 bool IsMemberAccess=false) { 1448 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 1449 return; 1450 1451 if (isa<ObjCIvarRefExpr>(E)) { 1452 QualType ExpTy = E->getType(); 1453 if (IsMemberAccess && ExpTy->isPointerType()) { 1454 // If ivar is a structure pointer, assigning to field of 1455 // this struct follows gcc's behavior and makes it a non-ivar 1456 // writer-barrier conservatively. 1457 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1458 if (ExpTy->isRecordType()) { 1459 LV.setObjCIvar(false); 1460 return; 1461 } 1462 } 1463 LV.setObjCIvar(true); 1464 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1465 LV.setBaseIvarExp(Exp->getBase()); 1466 LV.setObjCArray(E->getType()->isArrayType()); 1467 return; 1468 } 1469 1470 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1471 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1472 if (VD->hasGlobalStorage()) { 1473 LV.setGlobalObjCRef(true); 1474 LV.setThreadLocalRef(VD->isThreadSpecified()); 1475 } 1476 } 1477 LV.setObjCArray(E->getType()->isArrayType()); 1478 return; 1479 } 1480 1481 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1482 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1483 return; 1484 } 1485 1486 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1487 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1488 if (LV.isObjCIvar()) { 1489 // If cast is to a structure pointer, follow gcc's behavior and make it 1490 // a non-ivar write-barrier. 1491 QualType ExpTy = E->getType(); 1492 if (ExpTy->isPointerType()) 1493 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1494 if (ExpTy->isRecordType()) 1495 LV.setObjCIvar(false); 1496 } 1497 return; 1498 } 1499 1500 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1501 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1502 return; 1503 } 1504 1505 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1506 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1507 return; 1508 } 1509 1510 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1511 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1512 return; 1513 } 1514 1515 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1516 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1517 return; 1518 } 1519 1520 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1521 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1522 if (LV.isObjCIvar() && !LV.isObjCArray()) 1523 // Using array syntax to assigning to what an ivar points to is not 1524 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1525 LV.setObjCIvar(false); 1526 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1527 // Using array syntax to assigning to what global points to is not 1528 // same as assigning to the global itself. {id *G;} G[i] = 0; 1529 LV.setGlobalObjCRef(false); 1530 return; 1531 } 1532 1533 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1534 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1535 // We don't know if member is an 'ivar', but this flag is looked at 1536 // only in the context of LV.isObjCIvar(). 1537 LV.setObjCArray(E->getType()->isArrayType()); 1538 return; 1539 } 1540} 1541 1542static llvm::Value * 1543EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1544 llvm::Value *V, llvm::Type *IRType, 1545 StringRef Name = StringRef()) { 1546 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1547 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1548} 1549 1550static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1551 const Expr *E, const VarDecl *VD) { 1552 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1553 "Var decl must have external storage or be a file var decl!"); 1554 1555 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1556 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1557 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1558 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1559 QualType T = E->getType(); 1560 LValue LV; 1561 if (VD->getType()->isReferenceType()) { 1562 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1563 LI->setAlignment(Alignment.getQuantity()); 1564 V = LI; 1565 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1566 } else { 1567 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1568 } 1569 setObjCGCLValueClass(CGF.getContext(), E, LV); 1570 return LV; 1571} 1572 1573static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1574 const Expr *E, const FunctionDecl *FD) { 1575 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1576 if (!FD->hasPrototype()) { 1577 if (const FunctionProtoType *Proto = 1578 FD->getType()->getAs<FunctionProtoType>()) { 1579 // Ugly case: for a K&R-style definition, the type of the definition 1580 // isn't the same as the type of a use. Correct for this with a 1581 // bitcast. 1582 QualType NoProtoType = 1583 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1584 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1585 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1586 } 1587 } 1588 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1589 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1590} 1591 1592LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1593 const NamedDecl *ND = E->getDecl(); 1594 CharUnits Alignment = getContext().getDeclAlign(ND); 1595 QualType T = E->getType(); 1596 1597 // A DeclRefExpr for a reference initialized by a constant expression can 1598 // appear without being odr-used. Directly emit the constant initializer. 1599 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1600 const Expr *Init = VD->getAnyInitializer(VD); 1601 if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() && 1602 VD->isUsableInConstantExpressions(getContext()) && 1603 VD->checkInitIsICE()) { 1604 llvm::Constant *Val = 1605 CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this); 1606 assert(Val && "failed to emit reference constant expression"); 1607 // FIXME: Eventually we will want to emit vector element references. 1608 return MakeAddrLValue(Val, T, Alignment); 1609 } 1610 } 1611 1612 // FIXME: We should be able to assert this for FunctionDecls as well! 1613 // FIXME: We should be able to assert this for all DeclRefExprs, not just 1614 // those with a valid source location. 1615 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 1616 !E->getLocation().isValid()) && 1617 "Should not use decl without marking it used!"); 1618 1619 if (ND->hasAttr<WeakRefAttr>()) { 1620 const ValueDecl *VD = cast<ValueDecl>(ND); 1621 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1622 return MakeAddrLValue(Aliasee, T, Alignment); 1623 } 1624 1625 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1626 // Check if this is a global variable. 1627 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1628 return EmitGlobalVarDeclLValue(*this, E, VD); 1629 1630 bool isBlockVariable = VD->hasAttr<BlocksAttr>(); 1631 1632 bool NonGCable = VD->hasLocalStorage() && 1633 !VD->getType()->isReferenceType() && 1634 !isBlockVariable; 1635 1636 llvm::Value *V = LocalDeclMap[VD]; 1637 if (!V && VD->isStaticLocal()) 1638 V = CGM.getStaticLocalDeclAddress(VD); 1639 1640 // Use special handling for lambdas. 1641 if (!V) { 1642 if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) { 1643 QualType LambdaTagType = getContext().getTagDeclType(FD->getParent()); 1644 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, 1645 LambdaTagType); 1646 return EmitLValueForField(LambdaLV, FD); 1647 } 1648 1649 assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal()); 1650 return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable), 1651 T, Alignment); 1652 } 1653 1654 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1655 1656 if (isBlockVariable) 1657 V = BuildBlockByrefAddress(V, VD); 1658 1659 LValue LV; 1660 if (VD->getType()->isReferenceType()) { 1661 llvm::LoadInst *LI = Builder.CreateLoad(V); 1662 LI->setAlignment(Alignment.getQuantity()); 1663 V = LI; 1664 LV = MakeNaturalAlignAddrLValue(V, T); 1665 } else { 1666 LV = MakeAddrLValue(V, T, Alignment); 1667 } 1668 1669 if (NonGCable) { 1670 LV.getQuals().removeObjCGCAttr(); 1671 LV.setNonGC(true); 1672 } 1673 setObjCGCLValueClass(getContext(), E, LV); 1674 return LV; 1675 } 1676 1677 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1678 return EmitFunctionDeclLValue(*this, E, fn); 1679 1680 llvm_unreachable("Unhandled DeclRefExpr"); 1681} 1682 1683LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1684 // __extension__ doesn't affect lvalue-ness. 1685 if (E->getOpcode() == UO_Extension) 1686 return EmitLValue(E->getSubExpr()); 1687 1688 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1689 switch (E->getOpcode()) { 1690 default: llvm_unreachable("Unknown unary operator lvalue!"); 1691 case UO_Deref: { 1692 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1693 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1694 1695 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1696 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1697 1698 // We should not generate __weak write barrier on indirect reference 1699 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1700 // But, we continue to generate __strong write barrier on indirect write 1701 // into a pointer to object. 1702 if (getLangOpts().ObjC1 && 1703 getLangOpts().getGC() != LangOptions::NonGC && 1704 LV.isObjCWeak()) 1705 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1706 return LV; 1707 } 1708 case UO_Real: 1709 case UO_Imag: { 1710 LValue LV = EmitLValue(E->getSubExpr()); 1711 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1712 llvm::Value *Addr = LV.getAddress(); 1713 1714 // __real is valid on scalars. This is a faster way of testing that. 1715 // __imag can only produce an rvalue on scalars. 1716 if (E->getOpcode() == UO_Real && 1717 !cast<llvm::PointerType>(Addr->getType()) 1718 ->getElementType()->isStructTy()) { 1719 assert(E->getSubExpr()->getType()->isArithmeticType()); 1720 return LV; 1721 } 1722 1723 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1724 1725 unsigned Idx = E->getOpcode() == UO_Imag; 1726 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1727 Idx, "idx"), 1728 ExprTy); 1729 } 1730 case UO_PreInc: 1731 case UO_PreDec: { 1732 LValue LV = EmitLValue(E->getSubExpr()); 1733 bool isInc = E->getOpcode() == UO_PreInc; 1734 1735 if (E->getType()->isAnyComplexType()) 1736 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1737 else 1738 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1739 return LV; 1740 } 1741 } 1742} 1743 1744LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1745 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1746 E->getType()); 1747} 1748 1749LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1750 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1751 E->getType()); 1752} 1753 1754static llvm::Constant* 1755GetAddrOfConstantWideString(StringRef Str, 1756 const char *GlobalName, 1757 ASTContext &Context, 1758 QualType Ty, SourceLocation Loc, 1759 CodeGenModule &CGM) { 1760 1761 StringLiteral *SL = StringLiteral::Create(Context, 1762 Str, 1763 StringLiteral::Wide, 1764 /*Pascal = */false, 1765 Ty, Loc); 1766 llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL); 1767 llvm::GlobalVariable *GV = 1768 new llvm::GlobalVariable(CGM.getModule(), C->getType(), 1769 !CGM.getLangOpts().WritableStrings, 1770 llvm::GlobalValue::PrivateLinkage, 1771 C, GlobalName); 1772 const unsigned WideAlignment = 1773 Context.getTypeAlignInChars(Ty).getQuantity(); 1774 GV->setAlignment(WideAlignment); 1775 return GV; 1776} 1777 1778static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source, 1779 SmallString<32>& Target) { 1780 Target.resize(CharByteWidth * (Source.size() + 1)); 1781 char *ResultPtr = &Target[0]; 1782 const UTF8 *ErrorPtr; 1783 bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr); 1784 (void)success; 1785 assert(success); 1786 Target.resize(ResultPtr - &Target[0]); 1787} 1788 1789LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1790 switch (E->getIdentType()) { 1791 default: 1792 return EmitUnsupportedLValue(E, "predefined expression"); 1793 1794 case PredefinedExpr::Func: 1795 case PredefinedExpr::Function: 1796 case PredefinedExpr::LFunction: 1797 case PredefinedExpr::PrettyFunction: { 1798 unsigned IdentType = E->getIdentType(); 1799 std::string GlobalVarName; 1800 1801 switch (IdentType) { 1802 default: llvm_unreachable("Invalid type"); 1803 case PredefinedExpr::Func: 1804 GlobalVarName = "__func__."; 1805 break; 1806 case PredefinedExpr::Function: 1807 GlobalVarName = "__FUNCTION__."; 1808 break; 1809 case PredefinedExpr::LFunction: 1810 GlobalVarName = "L__FUNCTION__."; 1811 break; 1812 case PredefinedExpr::PrettyFunction: 1813 GlobalVarName = "__PRETTY_FUNCTION__."; 1814 break; 1815 } 1816 1817 StringRef FnName = CurFn->getName(); 1818 if (FnName.startswith("\01")) 1819 FnName = FnName.substr(1); 1820 GlobalVarName += FnName; 1821 1822 const Decl *CurDecl = CurCodeDecl; 1823 if (CurDecl == 0) 1824 CurDecl = getContext().getTranslationUnitDecl(); 1825 1826 std::string FunctionName = 1827 (isa<BlockDecl>(CurDecl) 1828 ? FnName.str() 1829 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType, 1830 CurDecl)); 1831 1832 const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual(); 1833 llvm::Constant *C; 1834 if (ElemType->isWideCharType()) { 1835 SmallString<32> RawChars; 1836 ConvertUTF8ToWideString( 1837 getContext().getTypeSizeInChars(ElemType).getQuantity(), 1838 FunctionName, RawChars); 1839 C = GetAddrOfConstantWideString(RawChars, 1840 GlobalVarName.c_str(), 1841 getContext(), 1842 E->getType(), 1843 E->getLocation(), 1844 CGM); 1845 } else { 1846 C = CGM.GetAddrOfConstantCString(FunctionName, 1847 GlobalVarName.c_str(), 1848 1); 1849 } 1850 return MakeAddrLValue(C, E->getType()); 1851 } 1852 } 1853} 1854 1855/// Emit a type description suitable for use by a runtime sanitizer library. The 1856/// format of a type descriptor is 1857/// 1858/// \code 1859/// { i16 TypeKind, i16 TypeInfo } 1860/// \endcode 1861/// 1862/// followed by an array of i8 containing the type name. TypeKind is 0 for an 1863/// integer, 1 for a floating point value, and -1 for anything else. 1864llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 1865 // FIXME: Only emit each type's descriptor once. 1866 uint16_t TypeKind = -1; 1867 uint16_t TypeInfo = 0; 1868 1869 if (T->isIntegerType()) { 1870 TypeKind = 0; 1871 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 1872 (T->isSignedIntegerType() ? 1 : 0); 1873 } else if (T->isFloatingType()) { 1874 TypeKind = 1; 1875 TypeInfo = getContext().getTypeSize(T); 1876 } 1877 1878 // Format the type name as if for a diagnostic, including quotes and 1879 // optionally an 'aka'. 1880 llvm::SmallString<32> Buffer; 1881 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 1882 (intptr_t)T.getAsOpaquePtr(), 1883 0, 0, 0, 0, 0, 0, Buffer, 1884 ArrayRef<intptr_t>()); 1885 1886 llvm::Constant *Components[] = { 1887 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 1888 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 1889 }; 1890 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 1891 1892 llvm::GlobalVariable *GV = 1893 new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(), 1894 /*isConstant=*/true, 1895 llvm::GlobalVariable::PrivateLinkage, 1896 Descriptor); 1897 GV->setUnnamedAddr(true); 1898 return GV; 1899} 1900 1901llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 1902 llvm::Type *TargetTy = IntPtrTy; 1903 1904 // Integers which fit in intptr_t are zero-extended and passed directly. 1905 if (V->getType()->isIntegerTy() && 1906 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 1907 return Builder.CreateZExt(V, TargetTy); 1908 1909 // Pointers are passed directly, everything else is passed by address. 1910 if (!V->getType()->isPointerTy()) { 1911 llvm::Value *Ptr = Builder.CreateAlloca(V->getType()); 1912 Builder.CreateStore(V, Ptr); 1913 V = Ptr; 1914 } 1915 return Builder.CreatePtrToInt(V, TargetTy); 1916} 1917 1918/// \brief Emit a representation of a SourceLocation for passing to a handler 1919/// in a sanitizer runtime library. The format for this data is: 1920/// \code 1921/// struct SourceLocation { 1922/// const char *Filename; 1923/// int32_t Line, Column; 1924/// }; 1925/// \endcode 1926/// For an invalid SourceLocation, the Filename pointer is null. 1927llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 1928 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 1929 1930 llvm::Constant *Data[] = { 1931 // FIXME: Only emit each file name once. 1932 PLoc.isValid() ? cast<llvm::Constant>( 1933 Builder.CreateGlobalStringPtr(PLoc.getFilename())) 1934 : llvm::Constant::getNullValue(Int8PtrTy), 1935 Builder.getInt32(PLoc.getLine()), 1936 Builder.getInt32(PLoc.getColumn()) 1937 }; 1938 1939 return llvm::ConstantStruct::getAnon(Data); 1940} 1941 1942void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName, 1943 llvm::ArrayRef<llvm::Constant *> StaticArgs, 1944 llvm::ArrayRef<llvm::Value *> DynamicArgs, 1945 CheckRecoverableKind RecoverKind) { 1946 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1947 1948 llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName); 1949 Builder.CreateCondBr(Checked, Cont, Handler); 1950 EmitBlock(Handler); 1951 1952 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 1953 llvm::GlobalValue *InfoPtr = 1954 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true, 1955 llvm::GlobalVariable::PrivateLinkage, Info); 1956 InfoPtr->setUnnamedAddr(true); 1957 1958 llvm::SmallVector<llvm::Value *, 4> Args; 1959 llvm::SmallVector<llvm::Type *, 4> ArgTypes; 1960 Args.reserve(DynamicArgs.size() + 1); 1961 ArgTypes.reserve(DynamicArgs.size() + 1); 1962 1963 // Handler functions take an i8* pointing to the (handler-specific) static 1964 // information block, followed by a sequence of intptr_t arguments 1965 // representing operand values. 1966 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 1967 ArgTypes.push_back(Int8PtrTy); 1968 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 1969 Args.push_back(EmitCheckValue(DynamicArgs[i])); 1970 ArgTypes.push_back(IntPtrTy); 1971 } 1972 1973 bool Recover = (RecoverKind == CRK_AlwaysRecoverable) || 1974 ((RecoverKind == CRK_Recoverable) && 1975 CGM.getCodeGenOpts().SanitizeRecover); 1976 1977 llvm::FunctionType *FnType = 1978 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 1979 llvm::AttrBuilder B; 1980 if (!Recover) { 1981 B.addAttribute(llvm::Attributes::NoReturn) 1982 .addAttribute(llvm::Attributes::NoUnwind); 1983 } 1984 B.addAttribute(llvm::Attributes::UWTable); 1985 1986 // Checks that have two variants use a suffix to differentiate them 1987 bool NeedsAbortSuffix = (RecoverKind != CRK_Unrecoverable) && 1988 !CGM.getCodeGenOpts().SanitizeRecover; 1989 std::string FunctionName = ("__ubsan_handle_" + CheckName + 1990 (NeedsAbortSuffix? "_abort" : "")).str(); 1991 llvm::Value *Fn = 1992 CGM.CreateRuntimeFunction(FnType, FunctionName, 1993 llvm::Attributes::get(getLLVMContext(), B)); 1994 llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args); 1995 if (Recover) { 1996 Builder.CreateBr(Cont); 1997 } else { 1998 HandlerCall->setDoesNotReturn(); 1999 HandlerCall->setDoesNotThrow(); 2000 Builder.CreateUnreachable(); 2001 } 2002 2003 EmitBlock(Cont); 2004} 2005 2006void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) { 2007 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2008 2009 // If we're optimizing, collapse all calls to trap down to just one per 2010 // function to save on code size. 2011 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 2012 TrapBB = createBasicBlock("trap"); 2013 Builder.CreateCondBr(Checked, Cont, TrapBB); 2014 EmitBlock(TrapBB); 2015 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 2016 llvm::CallInst *TrapCall = Builder.CreateCall(F); 2017 TrapCall->setDoesNotReturn(); 2018 TrapCall->setDoesNotThrow(); 2019 Builder.CreateUnreachable(); 2020 } else { 2021 Builder.CreateCondBr(Checked, Cont, TrapBB); 2022 } 2023 2024 EmitBlock(Cont); 2025} 2026 2027/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 2028/// array to pointer, return the array subexpression. 2029static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 2030 // If this isn't just an array->pointer decay, bail out. 2031 const CastExpr *CE = dyn_cast<CastExpr>(E); 2032 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 2033 return 0; 2034 2035 // If this is a decay from variable width array, bail out. 2036 const Expr *SubExpr = CE->getSubExpr(); 2037 if (SubExpr->getType()->isVariableArrayType()) 2038 return 0; 2039 2040 return SubExpr; 2041} 2042 2043LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 2044 // The index must always be an integer, which is not an aggregate. Emit it. 2045 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 2046 QualType IdxTy = E->getIdx()->getType(); 2047 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 2048 2049 // If the base is a vector type, then we are forming a vector element lvalue 2050 // with this subscript. 2051 if (E->getBase()->getType()->isVectorType()) { 2052 // Emit the vector as an lvalue to get its address. 2053 LValue LHS = EmitLValue(E->getBase()); 2054 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 2055 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 2056 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 2057 E->getBase()->getType(), LHS.getAlignment()); 2058 } 2059 2060 // Extend or truncate the index type to 32 or 64-bits. 2061 if (Idx->getType() != IntPtrTy) 2062 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 2063 2064 // We know that the pointer points to a type of the correct size, unless the 2065 // size is a VLA or Objective-C interface. 2066 llvm::Value *Address = 0; 2067 CharUnits ArrayAlignment; 2068 if (const VariableArrayType *vla = 2069 getContext().getAsVariableArrayType(E->getType())) { 2070 // The base must be a pointer, which is not an aggregate. Emit 2071 // it. It needs to be emitted first in case it's what captures 2072 // the VLA bounds. 2073 Address = EmitScalarExpr(E->getBase()); 2074 2075 // The element count here is the total number of non-VLA elements. 2076 llvm::Value *numElements = getVLASize(vla).first; 2077 2078 // Effectively, the multiply by the VLA size is part of the GEP. 2079 // GEP indexes are signed, and scaling an index isn't permitted to 2080 // signed-overflow, so we use the same semantics for our explicit 2081 // multiply. We suppress this if overflow is not undefined behavior. 2082 if (getLangOpts().isSignedOverflowDefined()) { 2083 Idx = Builder.CreateMul(Idx, numElements); 2084 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2085 } else { 2086 Idx = Builder.CreateNSWMul(Idx, numElements); 2087 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 2088 } 2089 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 2090 // Indexing over an interface, as in "NSString *P; P[4];" 2091 llvm::Value *InterfaceSize = 2092 llvm::ConstantInt::get(Idx->getType(), 2093 getContext().getTypeSizeInChars(OIT).getQuantity()); 2094 2095 Idx = Builder.CreateMul(Idx, InterfaceSize); 2096 2097 // The base must be a pointer, which is not an aggregate. Emit it. 2098 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2099 Address = EmitCastToVoidPtr(Base); 2100 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2101 Address = Builder.CreateBitCast(Address, Base->getType()); 2102 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 2103 // If this is A[i] where A is an array, the frontend will have decayed the 2104 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 2105 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 2106 // "gep x, i" here. Emit one "gep A, 0, i". 2107 assert(Array->getType()->isArrayType() && 2108 "Array to pointer decay must have array source type!"); 2109 LValue ArrayLV = EmitLValue(Array); 2110 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 2111 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 2112 llvm::Value *Args[] = { Zero, Idx }; 2113 2114 // Propagate the alignment from the array itself to the result. 2115 ArrayAlignment = ArrayLV.getAlignment(); 2116 2117 if (getLangOpts().isSignedOverflowDefined()) 2118 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 2119 else 2120 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 2121 } else { 2122 // The base must be a pointer, which is not an aggregate. Emit it. 2123 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2124 if (getLangOpts().isSignedOverflowDefined()) 2125 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 2126 else 2127 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 2128 } 2129 2130 QualType T = E->getBase()->getType()->getPointeeType(); 2131 assert(!T.isNull() && 2132 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 2133 2134 2135 // Limit the alignment to that of the result type. 2136 LValue LV; 2137 if (!ArrayAlignment.isZero()) { 2138 CharUnits Align = getContext().getTypeAlignInChars(T); 2139 ArrayAlignment = std::min(Align, ArrayAlignment); 2140 LV = MakeAddrLValue(Address, T, ArrayAlignment); 2141 } else { 2142 LV = MakeNaturalAlignAddrLValue(Address, T); 2143 } 2144 2145 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 2146 2147 if (getLangOpts().ObjC1 && 2148 getLangOpts().getGC() != LangOptions::NonGC) { 2149 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2150 setObjCGCLValueClass(getContext(), E, LV); 2151 } 2152 return LV; 2153} 2154 2155static 2156llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder, 2157 SmallVector<unsigned, 4> &Elts) { 2158 SmallVector<llvm::Constant*, 4> CElts; 2159 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 2160 CElts.push_back(Builder.getInt32(Elts[i])); 2161 2162 return llvm::ConstantVector::get(CElts); 2163} 2164 2165LValue CodeGenFunction:: 2166EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 2167 // Emit the base vector as an l-value. 2168 LValue Base; 2169 2170 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 2171 if (E->isArrow()) { 2172 // If it is a pointer to a vector, emit the address and form an lvalue with 2173 // it. 2174 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 2175 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 2176 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 2177 Base.getQuals().removeObjCGCAttr(); 2178 } else if (E->getBase()->isGLValue()) { 2179 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 2180 // emit the base as an lvalue. 2181 assert(E->getBase()->getType()->isVectorType()); 2182 Base = EmitLValue(E->getBase()); 2183 } else { 2184 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 2185 assert(E->getBase()->getType()->isVectorType() && 2186 "Result must be a vector"); 2187 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 2188 2189 // Store the vector to memory (because LValue wants an address). 2190 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 2191 Builder.CreateStore(Vec, VecMem); 2192 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 2193 } 2194 2195 QualType type = 2196 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 2197 2198 // Encode the element access list into a vector of unsigned indices. 2199 SmallVector<unsigned, 4> Indices; 2200 E->getEncodedElementAccess(Indices); 2201 2202 if (Base.isSimple()) { 2203 llvm::Constant *CV = GenerateConstantVector(Builder, Indices); 2204 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 2205 Base.getAlignment()); 2206 } 2207 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 2208 2209 llvm::Constant *BaseElts = Base.getExtVectorElts(); 2210 SmallVector<llvm::Constant *, 4> CElts; 2211 2212 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 2213 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 2214 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 2215 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type, 2216 Base.getAlignment()); 2217} 2218 2219LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 2220 Expr *BaseExpr = E->getBase(); 2221 2222 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2223 LValue BaseLV; 2224 if (E->isArrow()) { 2225 llvm::Value *Ptr = EmitScalarExpr(BaseExpr); 2226 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 2227 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy); 2228 BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy); 2229 } else 2230 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 2231 2232 NamedDecl *ND = E->getMemberDecl(); 2233 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 2234 LValue LV = EmitLValueForField(BaseLV, Field); 2235 setObjCGCLValueClass(getContext(), E, LV); 2236 return LV; 2237 } 2238 2239 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 2240 return EmitGlobalVarDeclLValue(*this, E, VD); 2241 2242 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 2243 return EmitFunctionDeclLValue(*this, E, FD); 2244 2245 llvm_unreachable("Unhandled member declaration!"); 2246} 2247 2248LValue CodeGenFunction::EmitLValueForField(LValue base, 2249 const FieldDecl *field) { 2250 if (field->isBitField()) { 2251 const CGRecordLayout &RL = 2252 CGM.getTypes().getCGRecordLayout(field->getParent()); 2253 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 2254 llvm::Value *Addr = base.getAddress(); 2255 unsigned Idx = RL.getLLVMFieldNo(field); 2256 if (Idx != 0) 2257 // For structs, we GEP to the field that the record layout suggests. 2258 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); 2259 // Get the access type. 2260 llvm::Type *PtrTy = llvm::Type::getIntNPtrTy( 2261 getLLVMContext(), Info.StorageSize, 2262 CGM.getContext().getTargetAddressSpace(base.getType())); 2263 if (Addr->getType() != PtrTy) 2264 Addr = Builder.CreateBitCast(Addr, PtrTy); 2265 2266 QualType fieldType = 2267 field->getType().withCVRQualifiers(base.getVRQualifiers()); 2268 return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment()); 2269 } 2270 2271 const RecordDecl *rec = field->getParent(); 2272 QualType type = field->getType(); 2273 CharUnits alignment = getContext().getDeclAlign(field); 2274 2275 // FIXME: It should be impossible to have an LValue without alignment for a 2276 // complete type. 2277 if (!base.getAlignment().isZero()) 2278 alignment = std::min(alignment, base.getAlignment()); 2279 2280 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 2281 2282 llvm::Value *addr = base.getAddress(); 2283 unsigned cvr = base.getVRQualifiers(); 2284 if (rec->isUnion()) { 2285 // For unions, there is no pointer adjustment. 2286 assert(!type->isReferenceType() && "union has reference member"); 2287 } else { 2288 // For structs, we GEP to the field that the record layout suggests. 2289 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 2290 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 2291 2292 // If this is a reference field, load the reference right now. 2293 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 2294 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 2295 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 2296 load->setAlignment(alignment.getQuantity()); 2297 2298 if (CGM.shouldUseTBAA()) { 2299 llvm::MDNode *tbaa; 2300 if (mayAlias) 2301 tbaa = CGM.getTBAAInfo(getContext().CharTy); 2302 else 2303 tbaa = CGM.getTBAAInfo(type); 2304 CGM.DecorateInstruction(load, tbaa); 2305 } 2306 2307 addr = load; 2308 mayAlias = false; 2309 type = refType->getPointeeType(); 2310 if (type->isIncompleteType()) 2311 alignment = CharUnits(); 2312 else 2313 alignment = getContext().getTypeAlignInChars(type); 2314 cvr = 0; // qualifiers don't recursively apply to referencee 2315 } 2316 } 2317 2318 // Make sure that the address is pointing to the right type. This is critical 2319 // for both unions and structs. A union needs a bitcast, a struct element 2320 // will need a bitcast if the LLVM type laid out doesn't match the desired 2321 // type. 2322 addr = EmitBitCastOfLValueToProperType(*this, addr, 2323 CGM.getTypes().ConvertTypeForMem(type), 2324 field->getName()); 2325 2326 if (field->hasAttr<AnnotateAttr>()) 2327 addr = EmitFieldAnnotations(field, addr); 2328 2329 LValue LV = MakeAddrLValue(addr, type, alignment); 2330 LV.getQuals().addCVRQualifiers(cvr); 2331 2332 // __weak attribute on a field is ignored. 2333 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 2334 LV.getQuals().removeObjCGCAttr(); 2335 2336 // Fields of may_alias structs act like 'char' for TBAA purposes. 2337 // FIXME: this should get propagated down through anonymous structs 2338 // and unions. 2339 if (mayAlias && LV.getTBAAInfo()) 2340 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 2341 2342 return LV; 2343} 2344 2345LValue 2346CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 2347 const FieldDecl *Field) { 2348 QualType FieldType = Field->getType(); 2349 2350 if (!FieldType->isReferenceType()) 2351 return EmitLValueForField(Base, Field); 2352 2353 const CGRecordLayout &RL = 2354 CGM.getTypes().getCGRecordLayout(Field->getParent()); 2355 unsigned idx = RL.getLLVMFieldNo(Field); 2356 llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx); 2357 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 2358 2359 // Make sure that the address is pointing to the right type. This is critical 2360 // for both unions and structs. A union needs a bitcast, a struct element 2361 // will need a bitcast if the LLVM type laid out doesn't match the desired 2362 // type. 2363 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 2364 V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName()); 2365 2366 CharUnits Alignment = getContext().getDeclAlign(Field); 2367 2368 // FIXME: It should be impossible to have an LValue without alignment for a 2369 // complete type. 2370 if (!Base.getAlignment().isZero()) 2371 Alignment = std::min(Alignment, Base.getAlignment()); 2372 2373 return MakeAddrLValue(V, FieldType, Alignment); 2374} 2375 2376LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 2377 if (E->isFileScope()) { 2378 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 2379 return MakeAddrLValue(GlobalPtr, E->getType()); 2380 } 2381 if (E->getType()->isVariablyModifiedType()) 2382 // make sure to emit the VLA size. 2383 EmitVariablyModifiedType(E->getType()); 2384 2385 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 2386 const Expr *InitExpr = E->getInitializer(); 2387 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 2388 2389 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 2390 /*Init*/ true); 2391 2392 return Result; 2393} 2394 2395LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 2396 if (!E->isGLValue()) 2397 // Initializing an aggregate temporary in C++11: T{...}. 2398 return EmitAggExprToLValue(E); 2399 2400 // An lvalue initializer list must be initializing a reference. 2401 assert(E->getNumInits() == 1 && "reference init with multiple values"); 2402 return EmitLValue(E->getInit(0)); 2403} 2404 2405LValue CodeGenFunction:: 2406EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 2407 if (!expr->isGLValue()) { 2408 // ?: here should be an aggregate. 2409 assert((hasAggregateLLVMType(expr->getType()) && 2410 !expr->getType()->isAnyComplexType()) && 2411 "Unexpected conditional operator!"); 2412 return EmitAggExprToLValue(expr); 2413 } 2414 2415 OpaqueValueMapping binding(*this, expr); 2416 2417 const Expr *condExpr = expr->getCond(); 2418 bool CondExprBool; 2419 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 2420 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 2421 if (!CondExprBool) std::swap(live, dead); 2422 2423 if (!ContainsLabel(dead)) 2424 return EmitLValue(live); 2425 } 2426 2427 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 2428 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 2429 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 2430 2431 ConditionalEvaluation eval(*this); 2432 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 2433 2434 // Any temporaries created here are conditional. 2435 EmitBlock(lhsBlock); 2436 eval.begin(*this); 2437 LValue lhs = EmitLValue(expr->getTrueExpr()); 2438 eval.end(*this); 2439 2440 if (!lhs.isSimple()) 2441 return EmitUnsupportedLValue(expr, "conditional operator"); 2442 2443 lhsBlock = Builder.GetInsertBlock(); 2444 Builder.CreateBr(contBlock); 2445 2446 // Any temporaries created here are conditional. 2447 EmitBlock(rhsBlock); 2448 eval.begin(*this); 2449 LValue rhs = EmitLValue(expr->getFalseExpr()); 2450 eval.end(*this); 2451 if (!rhs.isSimple()) 2452 return EmitUnsupportedLValue(expr, "conditional operator"); 2453 rhsBlock = Builder.GetInsertBlock(); 2454 2455 EmitBlock(contBlock); 2456 2457 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2458 "cond-lvalue"); 2459 phi->addIncoming(lhs.getAddress(), lhsBlock); 2460 phi->addIncoming(rhs.getAddress(), rhsBlock); 2461 return MakeAddrLValue(phi, expr->getType()); 2462} 2463 2464/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 2465/// type. If the cast is to a reference, we can have the usual lvalue result, 2466/// otherwise if a cast is needed by the code generator in an lvalue context, 2467/// then it must mean that we need the address of an aggregate in order to 2468/// access one of its members. This can happen for all the reasons that casts 2469/// are permitted with aggregate result, including noop aggregate casts, and 2470/// cast from scalar to union. 2471LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2472 switch (E->getCastKind()) { 2473 case CK_ToVoid: 2474 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2475 2476 case CK_Dependent: 2477 llvm_unreachable("dependent cast kind in IR gen!"); 2478 2479 case CK_BuiltinFnToFnPtr: 2480 llvm_unreachable("builtin functions are handled elsewhere"); 2481 2482 // These two casts are currently treated as no-ops, although they could 2483 // potentially be real operations depending on the target's ABI. 2484 case CK_NonAtomicToAtomic: 2485 case CK_AtomicToNonAtomic: 2486 2487 case CK_NoOp: 2488 case CK_LValueToRValue: 2489 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2490 || E->getType()->isRecordType()) 2491 return EmitLValue(E->getSubExpr()); 2492 // Fall through to synthesize a temporary. 2493 2494 case CK_BitCast: 2495 case CK_ArrayToPointerDecay: 2496 case CK_FunctionToPointerDecay: 2497 case CK_NullToMemberPointer: 2498 case CK_NullToPointer: 2499 case CK_IntegralToPointer: 2500 case CK_PointerToIntegral: 2501 case CK_PointerToBoolean: 2502 case CK_VectorSplat: 2503 case CK_IntegralCast: 2504 case CK_IntegralToBoolean: 2505 case CK_IntegralToFloating: 2506 case CK_FloatingToIntegral: 2507 case CK_FloatingToBoolean: 2508 case CK_FloatingCast: 2509 case CK_FloatingRealToComplex: 2510 case CK_FloatingComplexToReal: 2511 case CK_FloatingComplexToBoolean: 2512 case CK_FloatingComplexCast: 2513 case CK_FloatingComplexToIntegralComplex: 2514 case CK_IntegralRealToComplex: 2515 case CK_IntegralComplexToReal: 2516 case CK_IntegralComplexToBoolean: 2517 case CK_IntegralComplexCast: 2518 case CK_IntegralComplexToFloatingComplex: 2519 case CK_DerivedToBaseMemberPointer: 2520 case CK_BaseToDerivedMemberPointer: 2521 case CK_MemberPointerToBoolean: 2522 case CK_ReinterpretMemberPointer: 2523 case CK_AnyPointerToBlockPointerCast: 2524 case CK_ARCProduceObject: 2525 case CK_ARCConsumeObject: 2526 case CK_ARCReclaimReturnedObject: 2527 case CK_ARCExtendBlockObject: 2528 case CK_CopyAndAutoreleaseBlockObject: { 2529 // These casts only produce lvalues when we're binding a reference to a 2530 // temporary realized from a (converted) pure rvalue. Emit the expression 2531 // as a value, copy it into a temporary, and return an lvalue referring to 2532 // that temporary. 2533 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2534 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2535 return MakeAddrLValue(V, E->getType()); 2536 } 2537 2538 case CK_Dynamic: { 2539 LValue LV = EmitLValue(E->getSubExpr()); 2540 llvm::Value *V = LV.getAddress(); 2541 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2542 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2543 } 2544 2545 case CK_ConstructorConversion: 2546 case CK_UserDefinedConversion: 2547 case CK_CPointerToObjCPointerCast: 2548 case CK_BlockPointerToObjCPointerCast: 2549 return EmitLValue(E->getSubExpr()); 2550 2551 case CK_UncheckedDerivedToBase: 2552 case CK_DerivedToBase: { 2553 const RecordType *DerivedClassTy = 2554 E->getSubExpr()->getType()->getAs<RecordType>(); 2555 CXXRecordDecl *DerivedClassDecl = 2556 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2557 2558 LValue LV = EmitLValue(E->getSubExpr()); 2559 llvm::Value *This = LV.getAddress(); 2560 2561 // Perform the derived-to-base conversion 2562 llvm::Value *Base = 2563 GetAddressOfBaseClass(This, DerivedClassDecl, 2564 E->path_begin(), E->path_end(), 2565 /*NullCheckValue=*/false); 2566 2567 return MakeAddrLValue(Base, E->getType()); 2568 } 2569 case CK_ToUnion: 2570 return EmitAggExprToLValue(E); 2571 case CK_BaseToDerived: { 2572 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2573 CXXRecordDecl *DerivedClassDecl = 2574 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2575 2576 LValue LV = EmitLValue(E->getSubExpr()); 2577 2578 // Perform the base-to-derived conversion 2579 llvm::Value *Derived = 2580 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2581 E->path_begin(), E->path_end(), 2582 /*NullCheckValue=*/false); 2583 2584 return MakeAddrLValue(Derived, E->getType()); 2585 } 2586 case CK_LValueBitCast: { 2587 // This must be a reinterpret_cast (or c-style equivalent). 2588 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2589 2590 LValue LV = EmitLValue(E->getSubExpr()); 2591 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2592 ConvertType(CE->getTypeAsWritten())); 2593 return MakeAddrLValue(V, E->getType()); 2594 } 2595 case CK_ObjCObjectLValueCast: { 2596 LValue LV = EmitLValue(E->getSubExpr()); 2597 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2598 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2599 ConvertType(ToType)); 2600 return MakeAddrLValue(V, E->getType()); 2601 } 2602 } 2603 2604 llvm_unreachable("Unhandled lvalue cast kind?"); 2605} 2606 2607LValue CodeGenFunction::EmitNullInitializationLValue( 2608 const CXXScalarValueInitExpr *E) { 2609 QualType Ty = E->getType(); 2610 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2611 EmitNullInitialization(LV.getAddress(), Ty); 2612 return LV; 2613} 2614 2615LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2616 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2617 return getOpaqueLValueMapping(e); 2618} 2619 2620LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2621 const MaterializeTemporaryExpr *E) { 2622 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2623 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2624} 2625 2626RValue CodeGenFunction::EmitRValueForField(LValue LV, 2627 const FieldDecl *FD) { 2628 QualType FT = FD->getType(); 2629 LValue FieldLV = EmitLValueForField(LV, FD); 2630 if (FT->isAnyComplexType()) 2631 return RValue::getComplex( 2632 LoadComplexFromAddr(FieldLV.getAddress(), 2633 FieldLV.isVolatileQualified())); 2634 else if (CodeGenFunction::hasAggregateLLVMType(FT)) 2635 return FieldLV.asAggregateRValue(); 2636 2637 return EmitLoadOfLValue(FieldLV); 2638} 2639 2640//===--------------------------------------------------------------------===// 2641// Expression Emission 2642//===--------------------------------------------------------------------===// 2643 2644RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2645 ReturnValueSlot ReturnValue) { 2646 if (CGDebugInfo *DI = getDebugInfo()) 2647 DI->EmitLocation(Builder, E->getLocStart()); 2648 2649 // Builtins never have block type. 2650 if (E->getCallee()->getType()->isBlockPointerType()) 2651 return EmitBlockCallExpr(E, ReturnValue); 2652 2653 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2654 return EmitCXXMemberCallExpr(CE, ReturnValue); 2655 2656 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2657 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2658 2659 const Decl *TargetDecl = E->getCalleeDecl(); 2660 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2661 if (unsigned builtinID = FD->getBuiltinID()) 2662 return EmitBuiltinExpr(FD, builtinID, E); 2663 } 2664 2665 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2666 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2667 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2668 2669 if (const CXXPseudoDestructorExpr *PseudoDtor 2670 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2671 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2672 if (getLangOpts().ObjCAutoRefCount && 2673 DestroyedType->isObjCLifetimeType() && 2674 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2675 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2676 // Automatic Reference Counting: 2677 // If the pseudo-expression names a retainable object with weak or 2678 // strong lifetime, the object shall be released. 2679 Expr *BaseExpr = PseudoDtor->getBase(); 2680 llvm::Value *BaseValue = NULL; 2681 Qualifiers BaseQuals; 2682 2683 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2684 if (PseudoDtor->isArrow()) { 2685 BaseValue = EmitScalarExpr(BaseExpr); 2686 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2687 BaseQuals = PTy->getPointeeType().getQualifiers(); 2688 } else { 2689 LValue BaseLV = EmitLValue(BaseExpr); 2690 BaseValue = BaseLV.getAddress(); 2691 QualType BaseTy = BaseExpr->getType(); 2692 BaseQuals = BaseTy.getQualifiers(); 2693 } 2694 2695 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2696 case Qualifiers::OCL_None: 2697 case Qualifiers::OCL_ExplicitNone: 2698 case Qualifiers::OCL_Autoreleasing: 2699 break; 2700 2701 case Qualifiers::OCL_Strong: 2702 EmitARCRelease(Builder.CreateLoad(BaseValue, 2703 PseudoDtor->getDestroyedType().isVolatileQualified()), 2704 /*precise*/ true); 2705 break; 2706 2707 case Qualifiers::OCL_Weak: 2708 EmitARCDestroyWeak(BaseValue); 2709 break; 2710 } 2711 } else { 2712 // C++ [expr.pseudo]p1: 2713 // The result shall only be used as the operand for the function call 2714 // operator (), and the result of such a call has type void. The only 2715 // effect is the evaluation of the postfix-expression before the dot or 2716 // arrow. 2717 EmitScalarExpr(E->getCallee()); 2718 } 2719 2720 return RValue::get(0); 2721 } 2722 2723 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2724 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2725 E->arg_begin(), E->arg_end(), TargetDecl); 2726} 2727 2728LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2729 // Comma expressions just emit their LHS then their RHS as an l-value. 2730 if (E->getOpcode() == BO_Comma) { 2731 EmitIgnoredExpr(E->getLHS()); 2732 EnsureInsertPoint(); 2733 return EmitLValue(E->getRHS()); 2734 } 2735 2736 if (E->getOpcode() == BO_PtrMemD || 2737 E->getOpcode() == BO_PtrMemI) 2738 return EmitPointerToDataMemberBinaryExpr(E); 2739 2740 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2741 2742 // Note that in all of these cases, __block variables need the RHS 2743 // evaluated first just in case the variable gets moved by the RHS. 2744 2745 if (!hasAggregateLLVMType(E->getType())) { 2746 switch (E->getLHS()->getType().getObjCLifetime()) { 2747 case Qualifiers::OCL_Strong: 2748 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2749 2750 case Qualifiers::OCL_Autoreleasing: 2751 return EmitARCStoreAutoreleasing(E).first; 2752 2753 // No reason to do any of these differently. 2754 case Qualifiers::OCL_None: 2755 case Qualifiers::OCL_ExplicitNone: 2756 case Qualifiers::OCL_Weak: 2757 break; 2758 } 2759 2760 RValue RV = EmitAnyExpr(E->getRHS()); 2761 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 2762 EmitStoreThroughLValue(RV, LV); 2763 return LV; 2764 } 2765 2766 if (E->getType()->isAnyComplexType()) 2767 return EmitComplexAssignmentLValue(E); 2768 2769 return EmitAggExprToLValue(E); 2770} 2771 2772LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2773 RValue RV = EmitCallExpr(E); 2774 2775 if (!RV.isScalar()) 2776 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2777 2778 assert(E->getCallReturnType()->isReferenceType() && 2779 "Can't have a scalar return unless the return type is a " 2780 "reference type!"); 2781 2782 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2783} 2784 2785LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2786 // FIXME: This shouldn't require another copy. 2787 return EmitAggExprToLValue(E); 2788} 2789 2790LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2791 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2792 && "binding l-value to type which needs a temporary"); 2793 AggValueSlot Slot = CreateAggTemp(E->getType()); 2794 EmitCXXConstructExpr(E, Slot); 2795 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2796} 2797 2798LValue 2799CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2800 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2801} 2802 2803llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 2804 return CGM.GetAddrOfUuidDescriptor(E); 2805} 2806 2807LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 2808 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType()); 2809} 2810 2811LValue 2812CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2813 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2814 Slot.setExternallyDestructed(); 2815 EmitAggExpr(E->getSubExpr(), Slot); 2816 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 2817 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2818} 2819 2820LValue 2821CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) { 2822 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2823 EmitLambdaExpr(E, Slot); 2824 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2825} 2826 2827LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2828 RValue RV = EmitObjCMessageExpr(E); 2829 2830 if (!RV.isScalar()) 2831 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2832 2833 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2834 "Can't have a scalar return unless the return type is a " 2835 "reference type!"); 2836 2837 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2838} 2839 2840LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2841 llvm::Value *V = 2842 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2843 return MakeAddrLValue(V, E->getType()); 2844} 2845 2846llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2847 const ObjCIvarDecl *Ivar) { 2848 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2849} 2850 2851LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2852 llvm::Value *BaseValue, 2853 const ObjCIvarDecl *Ivar, 2854 unsigned CVRQualifiers) { 2855 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2856 Ivar, CVRQualifiers); 2857} 2858 2859LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2860 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2861 llvm::Value *BaseValue = 0; 2862 const Expr *BaseExpr = E->getBase(); 2863 Qualifiers BaseQuals; 2864 QualType ObjectTy; 2865 if (E->isArrow()) { 2866 BaseValue = EmitScalarExpr(BaseExpr); 2867 ObjectTy = BaseExpr->getType()->getPointeeType(); 2868 BaseQuals = ObjectTy.getQualifiers(); 2869 } else { 2870 LValue BaseLV = EmitLValue(BaseExpr); 2871 // FIXME: this isn't right for bitfields. 2872 BaseValue = BaseLV.getAddress(); 2873 ObjectTy = BaseExpr->getType(); 2874 BaseQuals = ObjectTy.getQualifiers(); 2875 } 2876 2877 LValue LV = 2878 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2879 BaseQuals.getCVRQualifiers()); 2880 setObjCGCLValueClass(getContext(), E, LV); 2881 return LV; 2882} 2883 2884LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2885 // Can only get l-value for message expression returning aggregate type 2886 RValue RV = EmitAnyExprToTemp(E); 2887 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2888} 2889 2890RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2891 ReturnValueSlot ReturnValue, 2892 CallExpr::const_arg_iterator ArgBeg, 2893 CallExpr::const_arg_iterator ArgEnd, 2894 const Decl *TargetDecl) { 2895 // Get the actual function type. The callee type will always be a pointer to 2896 // function type or a block pointer type. 2897 assert(CalleeType->isFunctionPointerType() && 2898 "Call must have function pointer type!"); 2899 2900 CalleeType = getContext().getCanonicalType(CalleeType); 2901 2902 const FunctionType *FnType 2903 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2904 2905 CallArgList Args; 2906 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2907 2908 const CGFunctionInfo &FnInfo = 2909 CGM.getTypes().arrangeFreeFunctionCall(Args, FnType); 2910 2911 // C99 6.5.2.2p6: 2912 // If the expression that denotes the called function has a type 2913 // that does not include a prototype, [the default argument 2914 // promotions are performed]. If the number of arguments does not 2915 // equal the number of parameters, the behavior is undefined. If 2916 // the function is defined with a type that includes a prototype, 2917 // and either the prototype ends with an ellipsis (, ...) or the 2918 // types of the arguments after promotion are not compatible with 2919 // the types of the parameters, the behavior is undefined. If the 2920 // function is defined with a type that does not include a 2921 // prototype, and the types of the arguments after promotion are 2922 // not compatible with those of the parameters after promotion, 2923 // the behavior is undefined [except in some trivial cases]. 2924 // That is, in the general case, we should assume that a call 2925 // through an unprototyped function type works like a *non-variadic* 2926 // call. The way we make this work is to cast to the exact type 2927 // of the promoted arguments. 2928 if (isa<FunctionNoProtoType>(FnType)) { 2929 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 2930 CalleeTy = CalleeTy->getPointerTo(); 2931 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2932 } 2933 2934 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2935} 2936 2937LValue CodeGenFunction:: 2938EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2939 llvm::Value *BaseV; 2940 if (E->getOpcode() == BO_PtrMemI) 2941 BaseV = EmitScalarExpr(E->getLHS()); 2942 else 2943 BaseV = EmitLValue(E->getLHS()).getAddress(); 2944 2945 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2946 2947 const MemberPointerType *MPT 2948 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2949 2950 llvm::Value *AddV = 2951 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2952 2953 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2954} 2955 2956static void 2957EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2958 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2959 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2960 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 2961 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; 2962 2963 switch (E->getOp()) { 2964 case AtomicExpr::AO__c11_atomic_init: 2965 llvm_unreachable("Already handled!"); 2966 2967 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 2968 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 2969 case AtomicExpr::AO__atomic_compare_exchange: 2970 case AtomicExpr::AO__atomic_compare_exchange_n: { 2971 // Note that cmpxchg only supports specifying one ordering and 2972 // doesn't support weak cmpxchg, at least at the moment. 2973 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2974 LoadVal1->setAlignment(Align); 2975 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 2976 LoadVal2->setAlignment(Align); 2977 llvm::AtomicCmpXchgInst *CXI = 2978 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 2979 CXI->setVolatile(E->isVolatile()); 2980 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 2981 StoreVal1->setAlignment(Align); 2982 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 2983 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 2984 return; 2985 } 2986 2987 case AtomicExpr::AO__c11_atomic_load: 2988 case AtomicExpr::AO__atomic_load_n: 2989 case AtomicExpr::AO__atomic_load: { 2990 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 2991 Load->setAtomic(Order); 2992 Load->setAlignment(Size); 2993 Load->setVolatile(E->isVolatile()); 2994 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 2995 StoreDest->setAlignment(Align); 2996 return; 2997 } 2998 2999 case AtomicExpr::AO__c11_atomic_store: 3000 case AtomicExpr::AO__atomic_store: 3001 case AtomicExpr::AO__atomic_store_n: { 3002 assert(!Dest && "Store does not return a value"); 3003 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3004 LoadVal1->setAlignment(Align); 3005 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 3006 Store->setAtomic(Order); 3007 Store->setAlignment(Size); 3008 Store->setVolatile(E->isVolatile()); 3009 return; 3010 } 3011 3012 case AtomicExpr::AO__c11_atomic_exchange: 3013 case AtomicExpr::AO__atomic_exchange_n: 3014 case AtomicExpr::AO__atomic_exchange: 3015 Op = llvm::AtomicRMWInst::Xchg; 3016 break; 3017 3018 case AtomicExpr::AO__atomic_add_fetch: 3019 PostOp = llvm::Instruction::Add; 3020 // Fall through. 3021 case AtomicExpr::AO__c11_atomic_fetch_add: 3022 case AtomicExpr::AO__atomic_fetch_add: 3023 Op = llvm::AtomicRMWInst::Add; 3024 break; 3025 3026 case AtomicExpr::AO__atomic_sub_fetch: 3027 PostOp = llvm::Instruction::Sub; 3028 // Fall through. 3029 case AtomicExpr::AO__c11_atomic_fetch_sub: 3030 case AtomicExpr::AO__atomic_fetch_sub: 3031 Op = llvm::AtomicRMWInst::Sub; 3032 break; 3033 3034 case AtomicExpr::AO__atomic_and_fetch: 3035 PostOp = llvm::Instruction::And; 3036 // Fall through. 3037 case AtomicExpr::AO__c11_atomic_fetch_and: 3038 case AtomicExpr::AO__atomic_fetch_and: 3039 Op = llvm::AtomicRMWInst::And; 3040 break; 3041 3042 case AtomicExpr::AO__atomic_or_fetch: 3043 PostOp = llvm::Instruction::Or; 3044 // Fall through. 3045 case AtomicExpr::AO__c11_atomic_fetch_or: 3046 case AtomicExpr::AO__atomic_fetch_or: 3047 Op = llvm::AtomicRMWInst::Or; 3048 break; 3049 3050 case AtomicExpr::AO__atomic_xor_fetch: 3051 PostOp = llvm::Instruction::Xor; 3052 // Fall through. 3053 case AtomicExpr::AO__c11_atomic_fetch_xor: 3054 case AtomicExpr::AO__atomic_fetch_xor: 3055 Op = llvm::AtomicRMWInst::Xor; 3056 break; 3057 3058 case AtomicExpr::AO__atomic_nand_fetch: 3059 PostOp = llvm::Instruction::And; 3060 // Fall through. 3061 case AtomicExpr::AO__atomic_fetch_nand: 3062 Op = llvm::AtomicRMWInst::Nand; 3063 break; 3064 } 3065 3066 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3067 LoadVal1->setAlignment(Align); 3068 llvm::AtomicRMWInst *RMWI = 3069 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 3070 RMWI->setVolatile(E->isVolatile()); 3071 3072 // For __atomic_*_fetch operations, perform the operation again to 3073 // determine the value which was written. 3074 llvm::Value *Result = RMWI; 3075 if (PostOp) 3076 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); 3077 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) 3078 Result = CGF.Builder.CreateNot(Result); 3079 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest); 3080 StoreDest->setAlignment(Align); 3081} 3082 3083// This function emits any expression (scalar, complex, or aggregate) 3084// into a temporary alloca. 3085static llvm::Value * 3086EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 3087 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 3088 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 3089 /*Init*/ true); 3090 return DeclPtr; 3091} 3092 3093static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 3094 llvm::Value *Dest) { 3095 if (Ty->isAnyComplexType()) 3096 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 3097 if (CGF.hasAggregateLLVMType(Ty)) 3098 return RValue::getAggregate(Dest); 3099 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 3100} 3101 3102RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 3103 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 3104 QualType MemTy = AtomicTy; 3105 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) 3106 MemTy = AT->getValueType(); 3107 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 3108 uint64_t Size = sizeChars.getQuantity(); 3109 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 3110 unsigned Align = alignChars.getQuantity(); 3111 unsigned MaxInlineWidthInBits = 3112 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 3113 bool UseLibcall = (Size != Align || 3114 getContext().toBits(sizeChars) > MaxInlineWidthInBits); 3115 3116 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 3117 Ptr = EmitScalarExpr(E->getPtr()); 3118 3119 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { 3120 assert(!Dest && "Init does not return a value"); 3121 if (!hasAggregateLLVMType(E->getVal1()->getType())) { 3122 QualType PointeeType 3123 = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType(); 3124 EmitScalarInit(EmitScalarExpr(E->getVal1()), 3125 LValue::MakeAddr(Ptr, PointeeType, alignChars, 3126 getContext())); 3127 } else if (E->getType()->isAnyComplexType()) { 3128 EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile()); 3129 } else { 3130 AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars, 3131 AtomicTy.getQualifiers(), 3132 AggValueSlot::IsNotDestructed, 3133 AggValueSlot::DoesNotNeedGCBarriers, 3134 AggValueSlot::IsNotAliased); 3135 EmitAggExpr(E->getVal1(), Slot); 3136 } 3137 return RValue::get(0); 3138 } 3139 3140 Order = EmitScalarExpr(E->getOrder()); 3141 3142 switch (E->getOp()) { 3143 case AtomicExpr::AO__c11_atomic_init: 3144 llvm_unreachable("Already handled!"); 3145 3146 case AtomicExpr::AO__c11_atomic_load: 3147 case AtomicExpr::AO__atomic_load_n: 3148 break; 3149 3150 case AtomicExpr::AO__atomic_load: 3151 Dest = EmitScalarExpr(E->getVal1()); 3152 break; 3153 3154 case AtomicExpr::AO__atomic_store: 3155 Val1 = EmitScalarExpr(E->getVal1()); 3156 break; 3157 3158 case AtomicExpr::AO__atomic_exchange: 3159 Val1 = EmitScalarExpr(E->getVal1()); 3160 Dest = EmitScalarExpr(E->getVal2()); 3161 break; 3162 3163 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3164 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3165 case AtomicExpr::AO__atomic_compare_exchange_n: 3166 case AtomicExpr::AO__atomic_compare_exchange: 3167 Val1 = EmitScalarExpr(E->getVal1()); 3168 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) 3169 Val2 = EmitScalarExpr(E->getVal2()); 3170 else 3171 Val2 = EmitValToTemp(*this, E->getVal2()); 3172 OrderFail = EmitScalarExpr(E->getOrderFail()); 3173 // Evaluate and discard the 'weak' argument. 3174 if (E->getNumSubExprs() == 6) 3175 EmitScalarExpr(E->getWeak()); 3176 break; 3177 3178 case AtomicExpr::AO__c11_atomic_fetch_add: 3179 case AtomicExpr::AO__c11_atomic_fetch_sub: 3180 if (MemTy->isPointerType()) { 3181 // For pointer arithmetic, we're required to do a bit of math: 3182 // adding 1 to an int* is not the same as adding 1 to a uintptr_t. 3183 // ... but only for the C11 builtins. The GNU builtins expect the 3184 // user to multiply by sizeof(T). 3185 QualType Val1Ty = E->getVal1()->getType(); 3186 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 3187 CharUnits PointeeIncAmt = 3188 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 3189 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 3190 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 3191 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 3192 break; 3193 } 3194 // Fall through. 3195 case AtomicExpr::AO__atomic_fetch_add: 3196 case AtomicExpr::AO__atomic_fetch_sub: 3197 case AtomicExpr::AO__atomic_add_fetch: 3198 case AtomicExpr::AO__atomic_sub_fetch: 3199 case AtomicExpr::AO__c11_atomic_store: 3200 case AtomicExpr::AO__c11_atomic_exchange: 3201 case AtomicExpr::AO__atomic_store_n: 3202 case AtomicExpr::AO__atomic_exchange_n: 3203 case AtomicExpr::AO__c11_atomic_fetch_and: 3204 case AtomicExpr::AO__c11_atomic_fetch_or: 3205 case AtomicExpr::AO__c11_atomic_fetch_xor: 3206 case AtomicExpr::AO__atomic_fetch_and: 3207 case AtomicExpr::AO__atomic_fetch_or: 3208 case AtomicExpr::AO__atomic_fetch_xor: 3209 case AtomicExpr::AO__atomic_fetch_nand: 3210 case AtomicExpr::AO__atomic_and_fetch: 3211 case AtomicExpr::AO__atomic_or_fetch: 3212 case AtomicExpr::AO__atomic_xor_fetch: 3213 case AtomicExpr::AO__atomic_nand_fetch: 3214 Val1 = EmitValToTemp(*this, E->getVal1()); 3215 break; 3216 } 3217 3218 if (!E->getType()->isVoidType() && !Dest) 3219 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 3220 3221 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 3222 if (UseLibcall) { 3223 3224 llvm::SmallVector<QualType, 5> Params; 3225 CallArgList Args; 3226 // Size is always the first parameter 3227 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 3228 getContext().getSizeType()); 3229 // Atomic address is always the second parameter 3230 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 3231 getContext().VoidPtrTy); 3232 3233 const char* LibCallName; 3234 QualType RetTy = getContext().VoidTy; 3235 switch (E->getOp()) { 3236 // There is only one libcall for compare an exchange, because there is no 3237 // optimisation benefit possible from a libcall version of a weak compare 3238 // and exchange. 3239 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, 3240 // void *desired, int success, int failure) 3241 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3242 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3243 case AtomicExpr::AO__atomic_compare_exchange: 3244 case AtomicExpr::AO__atomic_compare_exchange_n: 3245 LibCallName = "__atomic_compare_exchange"; 3246 RetTy = getContext().BoolTy; 3247 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3248 getContext().VoidPtrTy); 3249 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 3250 getContext().VoidPtrTy); 3251 Args.add(RValue::get(Order), 3252 getContext().IntTy); 3253 Order = OrderFail; 3254 break; 3255 // void __atomic_exchange(size_t size, void *mem, void *val, void *return, 3256 // int order) 3257 case AtomicExpr::AO__c11_atomic_exchange: 3258 case AtomicExpr::AO__atomic_exchange_n: 3259 case AtomicExpr::AO__atomic_exchange: 3260 LibCallName = "__atomic_exchange"; 3261 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3262 getContext().VoidPtrTy); 3263 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 3264 getContext().VoidPtrTy); 3265 break; 3266 // void __atomic_store(size_t size, void *mem, void *val, int order) 3267 case AtomicExpr::AO__c11_atomic_store: 3268 case AtomicExpr::AO__atomic_store: 3269 case AtomicExpr::AO__atomic_store_n: 3270 LibCallName = "__atomic_store"; 3271 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3272 getContext().VoidPtrTy); 3273 break; 3274 // void __atomic_load(size_t size, void *mem, void *return, int order) 3275 case AtomicExpr::AO__c11_atomic_load: 3276 case AtomicExpr::AO__atomic_load: 3277 case AtomicExpr::AO__atomic_load_n: 3278 LibCallName = "__atomic_load"; 3279 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 3280 getContext().VoidPtrTy); 3281 break; 3282#if 0 3283 // These are only defined for 1-16 byte integers. It is not clear what 3284 // their semantics would be on anything else... 3285 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 3286 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 3287 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 3288 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 3289 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 3290#endif 3291 default: return EmitUnsupportedRValue(E, "atomic library call"); 3292 } 3293 // order is always the last parameter 3294 Args.add(RValue::get(Order), 3295 getContext().IntTy); 3296 3297 const CGFunctionInfo &FuncInfo = 3298 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args, 3299 FunctionType::ExtInfo(), RequiredArgs::All); 3300 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); 3301 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 3302 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 3303 if (E->isCmpXChg()) 3304 return Res; 3305 if (E->getType()->isVoidType()) 3306 return RValue::get(0); 3307 return ConvertTempToRValue(*this, E->getType(), Dest); 3308 } 3309 3310 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || 3311 E->getOp() == AtomicExpr::AO__atomic_store || 3312 E->getOp() == AtomicExpr::AO__atomic_store_n; 3313 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || 3314 E->getOp() == AtomicExpr::AO__atomic_load || 3315 E->getOp() == AtomicExpr::AO__atomic_load_n; 3316 3317 llvm::Type *IPtrTy = 3318 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 3319 llvm::Value *OrigDest = Dest; 3320 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 3321 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 3322 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 3323 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 3324 3325 if (isa<llvm::ConstantInt>(Order)) { 3326 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 3327 switch (ord) { 3328 case 0: // memory_order_relaxed 3329 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3330 llvm::Monotonic); 3331 break; 3332 case 1: // memory_order_consume 3333 case 2: // memory_order_acquire 3334 if (IsStore) 3335 break; // Avoid crashing on code with undefined behavior 3336 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3337 llvm::Acquire); 3338 break; 3339 case 3: // memory_order_release 3340 if (IsLoad) 3341 break; // Avoid crashing on code with undefined behavior 3342 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3343 llvm::Release); 3344 break; 3345 case 4: // memory_order_acq_rel 3346 if (IsLoad || IsStore) 3347 break; // Avoid crashing on code with undefined behavior 3348 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3349 llvm::AcquireRelease); 3350 break; 3351 case 5: // memory_order_seq_cst 3352 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3353 llvm::SequentiallyConsistent); 3354 break; 3355 default: // invalid order 3356 // We should not ever get here normally, but it's hard to 3357 // enforce that in general. 3358 break; 3359 } 3360 if (E->getType()->isVoidType()) 3361 return RValue::get(0); 3362 return ConvertTempToRValue(*this, E->getType(), OrigDest); 3363 } 3364 3365 // Long case, when Order isn't obviously constant. 3366 3367 // Create all the relevant BB's 3368 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 3369 *AcqRelBB = 0, *SeqCstBB = 0; 3370 MonotonicBB = createBasicBlock("monotonic", CurFn); 3371 if (!IsStore) 3372 AcquireBB = createBasicBlock("acquire", CurFn); 3373 if (!IsLoad) 3374 ReleaseBB = createBasicBlock("release", CurFn); 3375 if (!IsLoad && !IsStore) 3376 AcqRelBB = createBasicBlock("acqrel", CurFn); 3377 SeqCstBB = createBasicBlock("seqcst", CurFn); 3378 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 3379 3380 // Create the switch for the split 3381 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 3382 // doesn't matter unless someone is crazy enough to use something that 3383 // doesn't fold to a constant for the ordering. 3384 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 3385 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 3386 3387 // Emit all the different atomics 3388 Builder.SetInsertPoint(MonotonicBB); 3389 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3390 llvm::Monotonic); 3391 Builder.CreateBr(ContBB); 3392 if (!IsStore) { 3393 Builder.SetInsertPoint(AcquireBB); 3394 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3395 llvm::Acquire); 3396 Builder.CreateBr(ContBB); 3397 SI->addCase(Builder.getInt32(1), AcquireBB); 3398 SI->addCase(Builder.getInt32(2), AcquireBB); 3399 } 3400 if (!IsLoad) { 3401 Builder.SetInsertPoint(ReleaseBB); 3402 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3403 llvm::Release); 3404 Builder.CreateBr(ContBB); 3405 SI->addCase(Builder.getInt32(3), ReleaseBB); 3406 } 3407 if (!IsLoad && !IsStore) { 3408 Builder.SetInsertPoint(AcqRelBB); 3409 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3410 llvm::AcquireRelease); 3411 Builder.CreateBr(ContBB); 3412 SI->addCase(Builder.getInt32(4), AcqRelBB); 3413 } 3414 Builder.SetInsertPoint(SeqCstBB); 3415 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3416 llvm::SequentiallyConsistent); 3417 Builder.CreateBr(ContBB); 3418 SI->addCase(Builder.getInt32(5), SeqCstBB); 3419 3420 // Cleanup and return 3421 Builder.SetInsertPoint(ContBB); 3422 if (E->getType()->isVoidType()) 3423 return RValue::get(0); 3424 return ConvertTempToRValue(*this, E->getType(), OrigDest); 3425} 3426 3427void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 3428 assert(Val->getType()->isFPOrFPVectorTy()); 3429 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 3430 return; 3431 3432 llvm::MDBuilder MDHelper(getLLVMContext()); 3433 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 3434 3435 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 3436} 3437 3438namespace { 3439 struct LValueOrRValue { 3440 LValue LV; 3441 RValue RV; 3442 }; 3443} 3444 3445static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 3446 const PseudoObjectExpr *E, 3447 bool forLValue, 3448 AggValueSlot slot) { 3449 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3450 3451 // Find the result expression, if any. 3452 const Expr *resultExpr = E->getResultExpr(); 3453 LValueOrRValue result; 3454 3455 for (PseudoObjectExpr::const_semantics_iterator 3456 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3457 const Expr *semantic = *i; 3458 3459 // If this semantic expression is an opaque value, bind it 3460 // to the result of its source expression. 3461 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3462 3463 // If this is the result expression, we may need to evaluate 3464 // directly into the slot. 3465 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3466 OVMA opaqueData; 3467 if (ov == resultExpr && ov->isRValue() && !forLValue && 3468 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 3469 !ov->getType()->isAnyComplexType()) { 3470 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 3471 3472 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 3473 opaqueData = OVMA::bind(CGF, ov, LV); 3474 result.RV = slot.asRValue(); 3475 3476 // Otherwise, emit as normal. 3477 } else { 3478 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3479 3480 // If this is the result, also evaluate the result now. 3481 if (ov == resultExpr) { 3482 if (forLValue) 3483 result.LV = CGF.EmitLValue(ov); 3484 else 3485 result.RV = CGF.EmitAnyExpr(ov, slot); 3486 } 3487 } 3488 3489 opaques.push_back(opaqueData); 3490 3491 // Otherwise, if the expression is the result, evaluate it 3492 // and remember the result. 3493 } else if (semantic == resultExpr) { 3494 if (forLValue) 3495 result.LV = CGF.EmitLValue(semantic); 3496 else 3497 result.RV = CGF.EmitAnyExpr(semantic, slot); 3498 3499 // Otherwise, evaluate the expression in an ignored context. 3500 } else { 3501 CGF.EmitIgnoredExpr(semantic); 3502 } 3503 } 3504 3505 // Unbind all the opaques now. 3506 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 3507 opaques[i].unbind(CGF); 3508 3509 return result; 3510} 3511 3512RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 3513 AggValueSlot slot) { 3514 return emitPseudoObjectExpr(*this, E, false, slot).RV; 3515} 3516 3517LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 3518 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 3519} 3520