CGExpr.cpp revision 463b48ba7eab6d7c96d23b59caea7f25de548293
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenModule.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/DeclObjC.h"
24#include "clang/Basic/ConvertUTF.h"
25#include "clang/Frontend/CodeGenOptions.h"
26#include "llvm/ADT/Hashing.h"
27#include "llvm/DataLayout.h"
28#include "llvm/Intrinsics.h"
29#include "llvm/LLVMContext.h"
30#include "llvm/MDBuilder.h"
31using namespace clang;
32using namespace CodeGen;
33
34//===--------------------------------------------------------------------===//
35//                        Miscellaneous Helper Methods
36//===--------------------------------------------------------------------===//
37
38llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
39  unsigned addressSpace =
40    cast<llvm::PointerType>(value->getType())->getAddressSpace();
41
42  llvm::PointerType *destType = Int8PtrTy;
43  if (addressSpace)
44    destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
45
46  if (value->getType() == destType) return value;
47  return Builder.CreateBitCast(value, destType);
48}
49
50/// CreateTempAlloca - This creates a alloca and inserts it into the entry
51/// block.
52llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
53                                                    const Twine &Name) {
54  if (!Builder.isNamePreserving())
55    return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
56  return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
57}
58
59void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
60                                     llvm::Value *Init) {
61  llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
62  llvm::BasicBlock *Block = AllocaInsertPt->getParent();
63  Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
64}
65
66llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
67                                                const Twine &Name) {
68  llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
69  // FIXME: Should we prefer the preferred type alignment here?
70  CharUnits Align = getContext().getTypeAlignInChars(Ty);
71  Alloc->setAlignment(Align.getQuantity());
72  return Alloc;
73}
74
75llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
76                                                 const Twine &Name) {
77  llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
78  // FIXME: Should we prefer the preferred type alignment here?
79  CharUnits Align = getContext().getTypeAlignInChars(Ty);
80  Alloc->setAlignment(Align.getQuantity());
81  return Alloc;
82}
83
84/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
85/// expression and compare the result against zero, returning an Int1Ty value.
86llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
87  if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
88    llvm::Value *MemPtr = EmitScalarExpr(E);
89    return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
90  }
91
92  QualType BoolTy = getContext().BoolTy;
93  if (!E->getType()->isAnyComplexType())
94    return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
95
96  return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
97}
98
99/// EmitIgnoredExpr - Emit code to compute the specified expression,
100/// ignoring the result.
101void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
102  if (E->isRValue())
103    return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
104
105  // Just emit it as an l-value and drop the result.
106  EmitLValue(E);
107}
108
109/// EmitAnyExpr - Emit code to compute the specified expression which
110/// can have any type.  The result is returned as an RValue struct.
111/// If this is an aggregate expression, AggSlot indicates where the
112/// result should be returned.
113RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
114                                    AggValueSlot aggSlot,
115                                    bool ignoreResult) {
116  if (!hasAggregateLLVMType(E->getType()))
117    return RValue::get(EmitScalarExpr(E, ignoreResult));
118  else if (E->getType()->isAnyComplexType())
119    return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
120
121  if (!ignoreResult && aggSlot.isIgnored())
122    aggSlot = CreateAggTemp(E->getType(), "agg-temp");
123  EmitAggExpr(E, aggSlot);
124  return aggSlot.asRValue();
125}
126
127/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
128/// always be accessible even if no aggregate location is provided.
129RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
130  AggValueSlot AggSlot = AggValueSlot::ignored();
131
132  if (hasAggregateLLVMType(E->getType()) &&
133      !E->getType()->isAnyComplexType())
134    AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
135  return EmitAnyExpr(E, AggSlot);
136}
137
138/// EmitAnyExprToMem - Evaluate an expression into a given memory
139/// location.
140void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
141                                       llvm::Value *Location,
142                                       Qualifiers Quals,
143                                       bool IsInit) {
144  // FIXME: This function should take an LValue as an argument.
145  if (E->getType()->isAnyComplexType()) {
146    EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
147  } else if (hasAggregateLLVMType(E->getType())) {
148    CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
149    EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
150                                         AggValueSlot::IsDestructed_t(IsInit),
151                                         AggValueSlot::DoesNotNeedGCBarriers,
152                                         AggValueSlot::IsAliased_t(!IsInit)));
153  } else {
154    RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
155    LValue LV = MakeAddrLValue(Location, E->getType());
156    EmitStoreThroughLValue(RV, LV);
157  }
158}
159
160static llvm::Value *
161CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
162                         const NamedDecl *InitializedDecl) {
163  if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
164    if (VD->hasGlobalStorage()) {
165      SmallString<256> Name;
166      llvm::raw_svector_ostream Out(Name);
167      CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
168      Out.flush();
169
170      llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
171
172      // Create the reference temporary.
173      llvm::GlobalValue *RefTemp =
174        new llvm::GlobalVariable(CGF.CGM.getModule(),
175                                 RefTempTy, /*isConstant=*/false,
176                                 llvm::GlobalValue::InternalLinkage,
177                                 llvm::Constant::getNullValue(RefTempTy),
178                                 Name.str());
179      return RefTemp;
180    }
181  }
182
183  return CGF.CreateMemTemp(Type, "ref.tmp");
184}
185
186static llvm::Value *
187EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
188                            llvm::Value *&ReferenceTemporary,
189                            const CXXDestructorDecl *&ReferenceTemporaryDtor,
190                            QualType &ObjCARCReferenceLifetimeType,
191                            const NamedDecl *InitializedDecl) {
192  const MaterializeTemporaryExpr *M = NULL;
193  E = E->findMaterializedTemporary(M);
194  // Objective-C++ ARC:
195  //   If we are binding a reference to a temporary that has ownership, we
196  //   need to perform retain/release operations on the temporary.
197  if (M && CGF.getLangOpts().ObjCAutoRefCount &&
198      M->getType()->isObjCLifetimeType() &&
199      (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
200       M->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
201       M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
202    ObjCARCReferenceLifetimeType = M->getType();
203
204  if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
205    CGF.enterFullExpression(EWC);
206    CodeGenFunction::RunCleanupsScope Scope(CGF);
207
208    return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
209                                       ReferenceTemporary,
210                                       ReferenceTemporaryDtor,
211                                       ObjCARCReferenceLifetimeType,
212                                       InitializedDecl);
213  }
214
215  RValue RV;
216  if (E->isGLValue()) {
217    // Emit the expression as an lvalue.
218    LValue LV = CGF.EmitLValue(E);
219
220    if (LV.isSimple())
221      return LV.getAddress();
222
223    // We have to load the lvalue.
224    RV = CGF.EmitLoadOfLValue(LV);
225  } else {
226    if (!ObjCARCReferenceLifetimeType.isNull()) {
227      ReferenceTemporary = CreateReferenceTemporary(CGF,
228                                                  ObjCARCReferenceLifetimeType,
229                                                    InitializedDecl);
230
231
232      LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
233                                             ObjCARCReferenceLifetimeType);
234
235      CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
236                         RefTempDst, false);
237
238      bool ExtendsLifeOfTemporary = false;
239      if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
240        if (Var->extendsLifetimeOfTemporary())
241          ExtendsLifeOfTemporary = true;
242      } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
243        ExtendsLifeOfTemporary = true;
244      }
245
246      if (!ExtendsLifeOfTemporary) {
247        // Since the lifetime of this temporary isn't going to be extended,
248        // we need to clean it up ourselves at the end of the full expression.
249        switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
250        case Qualifiers::OCL_None:
251        case Qualifiers::OCL_ExplicitNone:
252        case Qualifiers::OCL_Autoreleasing:
253          break;
254
255        case Qualifiers::OCL_Strong: {
256          assert(!ObjCARCReferenceLifetimeType->isArrayType());
257          CleanupKind cleanupKind = CGF.getARCCleanupKind();
258          CGF.pushDestroy(cleanupKind,
259                          ReferenceTemporary,
260                          ObjCARCReferenceLifetimeType,
261                          CodeGenFunction::destroyARCStrongImprecise,
262                          cleanupKind & EHCleanup);
263          break;
264        }
265
266        case Qualifiers::OCL_Weak:
267          assert(!ObjCARCReferenceLifetimeType->isArrayType());
268          CGF.pushDestroy(NormalAndEHCleanup,
269                          ReferenceTemporary,
270                          ObjCARCReferenceLifetimeType,
271                          CodeGenFunction::destroyARCWeak,
272                          /*useEHCleanupForArray*/ true);
273          break;
274        }
275
276        ObjCARCReferenceLifetimeType = QualType();
277      }
278
279      return ReferenceTemporary;
280    }
281
282    SmallVector<SubobjectAdjustment, 2> Adjustments;
283    E = E->skipRValueSubobjectAdjustments(Adjustments);
284    if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
285      if (opaque->getType()->isRecordType())
286        return CGF.EmitOpaqueValueLValue(opaque).getAddress();
287
288    // Create a reference temporary if necessary.
289    AggValueSlot AggSlot = AggValueSlot::ignored();
290    if (CGF.hasAggregateLLVMType(E->getType()) &&
291        !E->getType()->isAnyComplexType()) {
292      ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
293                                                    InitializedDecl);
294      CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
295      AggValueSlot::IsDestructed_t isDestructed
296        = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
297      AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
298                                      Qualifiers(), isDestructed,
299                                      AggValueSlot::DoesNotNeedGCBarriers,
300                                      AggValueSlot::IsNotAliased);
301    }
302
303    if (InitializedDecl) {
304      // Get the destructor for the reference temporary.
305      if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
306        CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
307        if (!ClassDecl->hasTrivialDestructor())
308          ReferenceTemporaryDtor = ClassDecl->getDestructor();
309      }
310    }
311
312    RV = CGF.EmitAnyExpr(E, AggSlot);
313
314    // Check if need to perform derived-to-base casts and/or field accesses, to
315    // get from the temporary object we created (and, potentially, for which we
316    // extended the lifetime) to the subobject we're binding the reference to.
317    if (!Adjustments.empty()) {
318      llvm::Value *Object = RV.getAggregateAddr();
319      for (unsigned I = Adjustments.size(); I != 0; --I) {
320        SubobjectAdjustment &Adjustment = Adjustments[I-1];
321        switch (Adjustment.Kind) {
322        case SubobjectAdjustment::DerivedToBaseAdjustment:
323          Object =
324              CGF.GetAddressOfBaseClass(Object,
325                                        Adjustment.DerivedToBase.DerivedClass,
326                              Adjustment.DerivedToBase.BasePath->path_begin(),
327                              Adjustment.DerivedToBase.BasePath->path_end(),
328                                        /*NullCheckValue=*/false);
329          break;
330
331        case SubobjectAdjustment::FieldAdjustment: {
332          LValue LV = CGF.MakeAddrLValue(Object, E->getType());
333          LV = CGF.EmitLValueForField(LV, Adjustment.Field);
334          if (LV.isSimple()) {
335            Object = LV.getAddress();
336            break;
337          }
338
339          // For non-simple lvalues, we actually have to create a copy of
340          // the object we're binding to.
341          QualType T = Adjustment.Field->getType().getNonReferenceType()
342                                                  .getUnqualifiedType();
343          Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
344          LValue TempLV = CGF.MakeAddrLValue(Object,
345                                             Adjustment.Field->getType());
346          CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
347          break;
348        }
349
350        case SubobjectAdjustment::MemberPointerAdjustment: {
351          llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
352          Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
353                        CGF, Object, Ptr, Adjustment.Ptr.MPT);
354          break;
355        }
356        }
357      }
358
359      return Object;
360    }
361  }
362
363  if (RV.isAggregate())
364    return RV.getAggregateAddr();
365
366  // Create a temporary variable that we can bind the reference to.
367  ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
368                                                InitializedDecl);
369
370
371  unsigned Alignment =
372    CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
373  if (RV.isScalar())
374    CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
375                          /*Volatile=*/false, Alignment, E->getType());
376  else
377    CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
378                           /*Volatile=*/false);
379  return ReferenceTemporary;
380}
381
382RValue
383CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
384                                            const NamedDecl *InitializedDecl) {
385  llvm::Value *ReferenceTemporary = 0;
386  const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
387  QualType ObjCARCReferenceLifetimeType;
388  llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
389                                                   ReferenceTemporaryDtor,
390                                                   ObjCARCReferenceLifetimeType,
391                                                   InitializedDecl);
392  if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
393    // C++11 [dcl.ref]p5 (as amended by core issue 453):
394    //   If a glvalue to which a reference is directly bound designates neither
395    //   an existing object or function of an appropriate type nor a region of
396    //   storage of suitable size and alignment to contain an object of the
397    //   reference's type, the behavior is undefined.
398    QualType Ty = E->getType();
399    EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
400  }
401  if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
402    return RValue::get(Value);
403
404  // Make sure to call the destructor for the reference temporary.
405  const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
406  if (VD && VD->hasGlobalStorage()) {
407    if (ReferenceTemporaryDtor) {
408      llvm::Constant *DtorFn =
409        CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
410      CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
411                                    cast<llvm::Constant>(ReferenceTemporary));
412    } else {
413      assert(!ObjCARCReferenceLifetimeType.isNull());
414      // Note: We intentionally do not register a global "destructor" to
415      // release the object.
416    }
417
418    return RValue::get(Value);
419  }
420
421  if (ReferenceTemporaryDtor)
422    PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
423  else {
424    switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
425    case Qualifiers::OCL_None:
426      llvm_unreachable(
427                      "Not a reference temporary that needs to be deallocated");
428    case Qualifiers::OCL_ExplicitNone:
429    case Qualifiers::OCL_Autoreleasing:
430      // Nothing to do.
431      break;
432
433    case Qualifiers::OCL_Strong: {
434      bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
435      CleanupKind cleanupKind = getARCCleanupKind();
436      pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
437                  precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
438                  cleanupKind & EHCleanup);
439      break;
440    }
441
442    case Qualifiers::OCL_Weak: {
443      // __weak objects always get EH cleanups; otherwise, exceptions
444      // could cause really nasty crashes instead of mere leaks.
445      pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
446                  ObjCARCReferenceLifetimeType, destroyARCWeak, true);
447      break;
448    }
449    }
450  }
451
452  return RValue::get(Value);
453}
454
455
456/// getAccessedFieldNo - Given an encoded value and a result number, return the
457/// input field number being accessed.
458unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
459                                             const llvm::Constant *Elts) {
460  return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
461      ->getZExtValue();
462}
463
464/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
465static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
466                                    llvm::Value *High) {
467  llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
468  llvm::Value *K47 = Builder.getInt64(47);
469  llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
470  llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
471  llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
472  llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
473  return Builder.CreateMul(B1, KMul);
474}
475
476void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
477                                    llvm::Value *Address,
478                                    QualType Ty, CharUnits Alignment) {
479  if (!SanitizePerformTypeCheck)
480    return;
481
482  // Don't check pointers outside the default address space. The null check
483  // isn't correct, the object-size check isn't supported by LLVM, and we can't
484  // communicate the addresses to the runtime handler for the vptr check.
485  if (Address->getType()->getPointerAddressSpace())
486    return;
487
488  llvm::Value *Cond = 0;
489
490  if (getLangOpts().SanitizeNull) {
491    // The glvalue must not be an empty glvalue.
492    Cond = Builder.CreateICmpNE(
493        Address, llvm::Constant::getNullValue(Address->getType()));
494  }
495
496  if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) {
497    uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
498
499    // The glvalue must refer to a large enough storage region.
500    // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
501    //        to check this.
502    llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
503    llvm::Value *Min = Builder.getFalse();
504    llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
505    llvm::Value *LargeEnough =
506        Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
507                              llvm::ConstantInt::get(IntPtrTy, Size));
508    Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
509  }
510
511  uint64_t AlignVal = 0;
512
513  if (getLangOpts().SanitizeAlignment) {
514    AlignVal = Alignment.getQuantity();
515    if (!Ty->isIncompleteType() && !AlignVal)
516      AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
517
518    // The glvalue must be suitably aligned.
519    if (AlignVal) {
520      llvm::Value *Align =
521          Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
522                            llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
523      llvm::Value *Aligned =
524        Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
525      Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
526    }
527  }
528
529  if (Cond) {
530    llvm::Constant *StaticData[] = {
531      EmitCheckSourceLocation(Loc),
532      EmitCheckTypeDescriptor(Ty),
533      llvm::ConstantInt::get(SizeTy, AlignVal),
534      llvm::ConstantInt::get(Int8Ty, TCK)
535    };
536    EmitCheck(Cond, "type_mismatch", StaticData, Address, CRK_Recoverable);
537  }
538
539  // If possible, check that the vptr indicates that there is a subobject of
540  // type Ty at offset zero within this object.
541  CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
542  if (getLangOpts().SanitizeVptr && TCK != TCK_ConstructorCall &&
543      RD && RD->hasDefinition() && RD->isDynamicClass()) {
544    // Compute a hash of the mangled name of the type.
545    //
546    // FIXME: This is not guaranteed to be deterministic! Move to a
547    //        fingerprinting mechanism once LLVM provides one. For the time
548    //        being the implementation happens to be deterministic.
549    llvm::SmallString<64> MangledName;
550    llvm::raw_svector_ostream Out(MangledName);
551    CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
552                                                     Out);
553    llvm::hash_code TypeHash = hash_value(Out.str());
554
555    // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
556    llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
557    llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
558    llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
559    llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
560    llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
561
562    llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
563    Hash = Builder.CreateTrunc(Hash, IntPtrTy);
564
565    // Look the hash up in our cache.
566    const int CacheSize = 128;
567    llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
568    llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
569                                                   "__ubsan_vptr_type_cache");
570    llvm::Value *Slot = Builder.CreateAnd(Hash,
571                                          llvm::ConstantInt::get(IntPtrTy,
572                                                                 CacheSize-1));
573    llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
574    llvm::Value *CacheVal =
575      Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
576
577    // If the hash isn't in the cache, call a runtime handler to perform the
578    // hard work of checking whether the vptr is for an object of the right
579    // type. This will either fill in the cache and return, or produce a
580    // diagnostic.
581    llvm::Constant *StaticData[] = {
582      EmitCheckSourceLocation(Loc),
583      EmitCheckTypeDescriptor(Ty),
584      CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
585      llvm::ConstantInt::get(Int8Ty, TCK)
586    };
587    llvm::Value *DynamicData[] = { Address, Hash };
588    EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
589              "dynamic_type_cache_miss", StaticData, DynamicData,
590              CRK_AlwaysRecoverable);
591  }
592}
593
594
595CodeGenFunction::ComplexPairTy CodeGenFunction::
596EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
597                         bool isInc, bool isPre) {
598  ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
599                                            LV.isVolatileQualified());
600
601  llvm::Value *NextVal;
602  if (isa<llvm::IntegerType>(InVal.first->getType())) {
603    uint64_t AmountVal = isInc ? 1 : -1;
604    NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
605
606    // Add the inc/dec to the real part.
607    NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
608  } else {
609    QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
610    llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
611    if (!isInc)
612      FVal.changeSign();
613    NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
614
615    // Add the inc/dec to the real part.
616    NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
617  }
618
619  ComplexPairTy IncVal(NextVal, InVal.second);
620
621  // Store the updated result through the lvalue.
622  StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
623
624  // If this is a postinc, return the value read from memory, otherwise use the
625  // updated value.
626  return isPre ? IncVal : InVal;
627}
628
629
630//===----------------------------------------------------------------------===//
631//                         LValue Expression Emission
632//===----------------------------------------------------------------------===//
633
634RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
635  if (Ty->isVoidType())
636    return RValue::get(0);
637
638  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
639    llvm::Type *EltTy = ConvertType(CTy->getElementType());
640    llvm::Value *U = llvm::UndefValue::get(EltTy);
641    return RValue::getComplex(std::make_pair(U, U));
642  }
643
644  // If this is a use of an undefined aggregate type, the aggregate must have an
645  // identifiable address.  Just because the contents of the value are undefined
646  // doesn't mean that the address can't be taken and compared.
647  if (hasAggregateLLVMType(Ty)) {
648    llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
649    return RValue::getAggregate(DestPtr);
650  }
651
652  return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
653}
654
655RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
656                                              const char *Name) {
657  ErrorUnsupported(E, Name);
658  return GetUndefRValue(E->getType());
659}
660
661LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
662                                              const char *Name) {
663  ErrorUnsupported(E, Name);
664  llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
665  return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
666}
667
668LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
669  LValue LV = EmitLValue(E);
670  if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
671    EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
672                  E->getType(), LV.getAlignment());
673  return LV;
674}
675
676/// EmitLValue - Emit code to compute a designator that specifies the location
677/// of the expression.
678///
679/// This can return one of two things: a simple address or a bitfield reference.
680/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
681/// an LLVM pointer type.
682///
683/// If this returns a bitfield reference, nothing about the pointee type of the
684/// LLVM value is known: For example, it may not be a pointer to an integer.
685///
686/// If this returns a normal address, and if the lvalue's C type is fixed size,
687/// this method guarantees that the returned pointer type will point to an LLVM
688/// type of the same size of the lvalue's type.  If the lvalue has a variable
689/// length type, this is not possible.
690///
691LValue CodeGenFunction::EmitLValue(const Expr *E) {
692  switch (E->getStmtClass()) {
693  default: return EmitUnsupportedLValue(E, "l-value expression");
694
695  case Expr::ObjCPropertyRefExprClass:
696    llvm_unreachable("cannot emit a property reference directly");
697
698  case Expr::ObjCSelectorExprClass:
699    return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
700  case Expr::ObjCIsaExprClass:
701    return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
702  case Expr::BinaryOperatorClass:
703    return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
704  case Expr::CompoundAssignOperatorClass:
705    if (!E->getType()->isAnyComplexType())
706      return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
707    return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
708  case Expr::CallExprClass:
709  case Expr::CXXMemberCallExprClass:
710  case Expr::CXXOperatorCallExprClass:
711  case Expr::UserDefinedLiteralClass:
712    return EmitCallExprLValue(cast<CallExpr>(E));
713  case Expr::VAArgExprClass:
714    return EmitVAArgExprLValue(cast<VAArgExpr>(E));
715  case Expr::DeclRefExprClass:
716    return EmitDeclRefLValue(cast<DeclRefExpr>(E));
717  case Expr::ParenExprClass:
718    return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
719  case Expr::GenericSelectionExprClass:
720    return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
721  case Expr::PredefinedExprClass:
722    return EmitPredefinedLValue(cast<PredefinedExpr>(E));
723  case Expr::StringLiteralClass:
724    return EmitStringLiteralLValue(cast<StringLiteral>(E));
725  case Expr::ObjCEncodeExprClass:
726    return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
727  case Expr::PseudoObjectExprClass:
728    return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
729  case Expr::InitListExprClass:
730    return EmitInitListLValue(cast<InitListExpr>(E));
731  case Expr::CXXTemporaryObjectExprClass:
732  case Expr::CXXConstructExprClass:
733    return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
734  case Expr::CXXBindTemporaryExprClass:
735    return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
736  case Expr::CXXUuidofExprClass:
737    return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
738  case Expr::LambdaExprClass:
739    return EmitLambdaLValue(cast<LambdaExpr>(E));
740
741  case Expr::ExprWithCleanupsClass: {
742    const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
743    enterFullExpression(cleanups);
744    RunCleanupsScope Scope(*this);
745    return EmitLValue(cleanups->getSubExpr());
746  }
747
748  case Expr::CXXScalarValueInitExprClass:
749    return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
750  case Expr::CXXDefaultArgExprClass:
751    return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
752  case Expr::CXXTypeidExprClass:
753    return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
754
755  case Expr::ObjCMessageExprClass:
756    return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
757  case Expr::ObjCIvarRefExprClass:
758    return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
759  case Expr::StmtExprClass:
760    return EmitStmtExprLValue(cast<StmtExpr>(E));
761  case Expr::UnaryOperatorClass:
762    return EmitUnaryOpLValue(cast<UnaryOperator>(E));
763  case Expr::ArraySubscriptExprClass:
764    return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
765  case Expr::ExtVectorElementExprClass:
766    return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
767  case Expr::MemberExprClass:
768    return EmitMemberExpr(cast<MemberExpr>(E));
769  case Expr::CompoundLiteralExprClass:
770    return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
771  case Expr::ConditionalOperatorClass:
772    return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
773  case Expr::BinaryConditionalOperatorClass:
774    return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
775  case Expr::ChooseExprClass:
776    return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
777  case Expr::OpaqueValueExprClass:
778    return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
779  case Expr::SubstNonTypeTemplateParmExprClass:
780    return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
781  case Expr::ImplicitCastExprClass:
782  case Expr::CStyleCastExprClass:
783  case Expr::CXXFunctionalCastExprClass:
784  case Expr::CXXStaticCastExprClass:
785  case Expr::CXXDynamicCastExprClass:
786  case Expr::CXXReinterpretCastExprClass:
787  case Expr::CXXConstCastExprClass:
788  case Expr::ObjCBridgedCastExprClass:
789    return EmitCastLValue(cast<CastExpr>(E));
790
791  case Expr::MaterializeTemporaryExprClass:
792    return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
793  }
794}
795
796/// Given an object of the given canonical type, can we safely copy a
797/// value out of it based on its initializer?
798static bool isConstantEmittableObjectType(QualType type) {
799  assert(type.isCanonical());
800  assert(!type->isReferenceType());
801
802  // Must be const-qualified but non-volatile.
803  Qualifiers qs = type.getLocalQualifiers();
804  if (!qs.hasConst() || qs.hasVolatile()) return false;
805
806  // Otherwise, all object types satisfy this except C++ classes with
807  // mutable subobjects or non-trivial copy/destroy behavior.
808  if (const RecordType *RT = dyn_cast<RecordType>(type))
809    if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
810      if (RD->hasMutableFields() || !RD->isTrivial())
811        return false;
812
813  return true;
814}
815
816/// Can we constant-emit a load of a reference to a variable of the
817/// given type?  This is different from predicates like
818/// Decl::isUsableInConstantExpressions because we do want it to apply
819/// in situations that don't necessarily satisfy the language's rules
820/// for this (e.g. C++'s ODR-use rules).  For example, we want to able
821/// to do this with const float variables even if those variables
822/// aren't marked 'constexpr'.
823enum ConstantEmissionKind {
824  CEK_None,
825  CEK_AsReferenceOnly,
826  CEK_AsValueOrReference,
827  CEK_AsValueOnly
828};
829static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
830  type = type.getCanonicalType();
831  if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
832    if (isConstantEmittableObjectType(ref->getPointeeType()))
833      return CEK_AsValueOrReference;
834    return CEK_AsReferenceOnly;
835  }
836  if (isConstantEmittableObjectType(type))
837    return CEK_AsValueOnly;
838  return CEK_None;
839}
840
841/// Try to emit a reference to the given value without producing it as
842/// an l-value.  This is actually more than an optimization: we can't
843/// produce an l-value for variables that we never actually captured
844/// in a block or lambda, which means const int variables or constexpr
845/// literals or similar.
846CodeGenFunction::ConstantEmission
847CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
848  ValueDecl *value = refExpr->getDecl();
849
850  // The value needs to be an enum constant or a constant variable.
851  ConstantEmissionKind CEK;
852  if (isa<ParmVarDecl>(value)) {
853    CEK = CEK_None;
854  } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
855    CEK = checkVarTypeForConstantEmission(var->getType());
856  } else if (isa<EnumConstantDecl>(value)) {
857    CEK = CEK_AsValueOnly;
858  } else {
859    CEK = CEK_None;
860  }
861  if (CEK == CEK_None) return ConstantEmission();
862
863  Expr::EvalResult result;
864  bool resultIsReference;
865  QualType resultType;
866
867  // It's best to evaluate all the way as an r-value if that's permitted.
868  if (CEK != CEK_AsReferenceOnly &&
869      refExpr->EvaluateAsRValue(result, getContext())) {
870    resultIsReference = false;
871    resultType = refExpr->getType();
872
873  // Otherwise, try to evaluate as an l-value.
874  } else if (CEK != CEK_AsValueOnly &&
875             refExpr->EvaluateAsLValue(result, getContext())) {
876    resultIsReference = true;
877    resultType = value->getType();
878
879  // Failure.
880  } else {
881    return ConstantEmission();
882  }
883
884  // In any case, if the initializer has side-effects, abandon ship.
885  if (result.HasSideEffects)
886    return ConstantEmission();
887
888  // Emit as a constant.
889  llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
890
891  // Make sure we emit a debug reference to the global variable.
892  // This should probably fire even for
893  if (isa<VarDecl>(value)) {
894    if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
895      EmitDeclRefExprDbgValue(refExpr, C);
896  } else {
897    assert(isa<EnumConstantDecl>(value));
898    EmitDeclRefExprDbgValue(refExpr, C);
899  }
900
901  // If we emitted a reference constant, we need to dereference that.
902  if (resultIsReference)
903    return ConstantEmission::forReference(C);
904
905  return ConstantEmission::forValue(C);
906}
907
908llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
909  return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
910                          lvalue.getAlignment().getQuantity(),
911                          lvalue.getType(), lvalue.getTBAAInfo());
912}
913
914static bool hasBooleanRepresentation(QualType Ty) {
915  if (Ty->isBooleanType())
916    return true;
917
918  if (const EnumType *ET = Ty->getAs<EnumType>())
919    return ET->getDecl()->getIntegerType()->isBooleanType();
920
921  if (const AtomicType *AT = Ty->getAs<AtomicType>())
922    return hasBooleanRepresentation(AT->getValueType());
923
924  return false;
925}
926
927static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
928                            llvm::APInt &Min, llvm::APInt &End,
929                            bool StrictEnums) {
930  const EnumType *ET = Ty->getAs<EnumType>();
931  bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
932                                ET && !ET->getDecl()->isFixed();
933  bool IsBool = hasBooleanRepresentation(Ty);
934  if (!IsBool && !IsRegularCPlusPlusEnum)
935    return false;
936
937  if (IsBool) {
938    Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
939    End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
940  } else {
941    const EnumDecl *ED = ET->getDecl();
942    llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
943    unsigned Bitwidth = LTy->getScalarSizeInBits();
944    unsigned NumNegativeBits = ED->getNumNegativeBits();
945    unsigned NumPositiveBits = ED->getNumPositiveBits();
946
947    if (NumNegativeBits) {
948      unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
949      assert(NumBits <= Bitwidth);
950      End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
951      Min = -End;
952    } else {
953      assert(NumPositiveBits <= Bitwidth);
954      End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
955      Min = llvm::APInt(Bitwidth, 0);
956    }
957  }
958  return true;
959}
960
961llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
962  llvm::APInt Min, End;
963  if (!getRangeForType(*this, Ty, Min, End,
964                       CGM.getCodeGenOpts().StrictEnums))
965    return 0;
966
967  llvm::MDBuilder MDHelper(getLLVMContext());
968  return MDHelper.createRange(Min, End);
969}
970
971llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
972                                              unsigned Alignment, QualType Ty,
973                                              llvm::MDNode *TBAAInfo) {
974
975  // For better performance, handle vector loads differently.
976  if (Ty->isVectorType()) {
977    llvm::Value *V;
978    const llvm::Type *EltTy =
979    cast<llvm::PointerType>(Addr->getType())->getElementType();
980
981    const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
982
983    // Handle vectors of size 3, like size 4 for better performance.
984    if (VTy->getNumElements() == 3) {
985
986      // Bitcast to vec4 type.
987      llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
988                                                         4);
989      llvm::PointerType *ptVec4Ty =
990      llvm::PointerType::get(vec4Ty,
991                             (cast<llvm::PointerType>(
992                                      Addr->getType()))->getAddressSpace());
993      llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
994                                                "castToVec4");
995      // Now load value.
996      llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
997
998      // Shuffle vector to get vec3.
999      llvm::Constant *Mask[] = {
1000        llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0),
1001        llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1),
1002        llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2)
1003      };
1004
1005      llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1006      V = Builder.CreateShuffleVector(LoadVal,
1007                                      llvm::UndefValue::get(vec4Ty),
1008                                      MaskV, "extractVec");
1009      return EmitFromMemory(V, Ty);
1010    }
1011  }
1012
1013  llvm::LoadInst *Load = Builder.CreateLoad(Addr);
1014  if (Volatile)
1015    Load->setVolatile(true);
1016  if (Alignment)
1017    Load->setAlignment(Alignment);
1018  if (TBAAInfo)
1019    CGM.DecorateInstruction(Load, TBAAInfo);
1020  // If this is an atomic type, all normal reads must be atomic
1021  if (Ty->isAtomicType())
1022    Load->setAtomic(llvm::SequentiallyConsistent);
1023
1024  if ((getLangOpts().SanitizeBool && hasBooleanRepresentation(Ty)) ||
1025      (getLangOpts().SanitizeEnum && Ty->getAs<EnumType>())) {
1026    llvm::APInt Min, End;
1027    if (getRangeForType(*this, Ty, Min, End, true)) {
1028      --End;
1029      llvm::Value *Check;
1030      if (!Min)
1031        Check = Builder.CreateICmpULE(
1032          Load, llvm::ConstantInt::get(getLLVMContext(), End));
1033      else {
1034        llvm::Value *Upper = Builder.CreateICmpSLE(
1035          Load, llvm::ConstantInt::get(getLLVMContext(), End));
1036        llvm::Value *Lower = Builder.CreateICmpSGE(
1037          Load, llvm::ConstantInt::get(getLLVMContext(), Min));
1038        Check = Builder.CreateAnd(Upper, Lower);
1039      }
1040      // FIXME: Provide a SourceLocation.
1041      EmitCheck(Check, "load_invalid_value", EmitCheckTypeDescriptor(Ty),
1042                EmitCheckValue(Load), CRK_Recoverable);
1043    }
1044  } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1045    if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1046      Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1047
1048  return EmitFromMemory(Load, Ty);
1049}
1050
1051llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1052  // Bool has a different representation in memory than in registers.
1053  if (hasBooleanRepresentation(Ty)) {
1054    // This should really always be an i1, but sometimes it's already
1055    // an i8, and it's awkward to track those cases down.
1056    if (Value->getType()->isIntegerTy(1))
1057      return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1058    assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1059           "wrong value rep of bool");
1060  }
1061
1062  return Value;
1063}
1064
1065llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1066  // Bool has a different representation in memory than in registers.
1067  if (hasBooleanRepresentation(Ty)) {
1068    assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1069           "wrong value rep of bool");
1070    return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1071  }
1072
1073  return Value;
1074}
1075
1076void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1077                                        bool Volatile, unsigned Alignment,
1078                                        QualType Ty,
1079                                        llvm::MDNode *TBAAInfo,
1080                                        bool isInit) {
1081
1082  // Handle vectors differently to get better performance.
1083  if (Ty->isVectorType()) {
1084    llvm::Type *SrcTy = Value->getType();
1085    llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
1086    // Handle vec3 special.
1087    if (VecTy->getNumElements() == 3) {
1088      llvm::LLVMContext &VMContext = getLLVMContext();
1089
1090      // Our source is a vec3, do a shuffle vector to make it a vec4.
1091      llvm::SmallVector<llvm::Constant*, 4> Mask;
1092      Mask.push_back(llvm::ConstantInt::get(
1093                                            llvm::Type::getInt32Ty(VMContext),
1094                                            0));
1095      Mask.push_back(llvm::ConstantInt::get(
1096                                            llvm::Type::getInt32Ty(VMContext),
1097                                            1));
1098      Mask.push_back(llvm::ConstantInt::get(
1099                                            llvm::Type::getInt32Ty(VMContext),
1100                                            2));
1101      Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
1102
1103      llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1104      Value = Builder.CreateShuffleVector(Value,
1105                                          llvm::UndefValue::get(VecTy),
1106                                          MaskV, "extractVec");
1107      SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1108    }
1109    llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
1110    if (DstPtr->getElementType() != SrcTy) {
1111      llvm::Type *MemTy =
1112      llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
1113      Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
1114    }
1115  }
1116
1117  Value = EmitToMemory(Value, Ty);
1118
1119  llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1120  if (Alignment)
1121    Store->setAlignment(Alignment);
1122  if (TBAAInfo)
1123    CGM.DecorateInstruction(Store, TBAAInfo);
1124  if (!isInit && Ty->isAtomicType())
1125    Store->setAtomic(llvm::SequentiallyConsistent);
1126}
1127
1128void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1129    bool isInit) {
1130  EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1131                    lvalue.getAlignment().getQuantity(), lvalue.getType(),
1132                    lvalue.getTBAAInfo(), isInit);
1133}
1134
1135/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1136/// method emits the address of the lvalue, then loads the result as an rvalue,
1137/// returning the rvalue.
1138RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
1139  if (LV.isObjCWeak()) {
1140    // load of a __weak object.
1141    llvm::Value *AddrWeakObj = LV.getAddress();
1142    return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1143                                                             AddrWeakObj));
1144  }
1145  if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1146    llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
1147    Object = EmitObjCConsumeObject(LV.getType(), Object);
1148    return RValue::get(Object);
1149  }
1150
1151  if (LV.isSimple()) {
1152    assert(!LV.getType()->isFunctionType());
1153
1154    // Everything needs a load.
1155    return RValue::get(EmitLoadOfScalar(LV));
1156  }
1157
1158  if (LV.isVectorElt()) {
1159    llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1160                                              LV.isVolatileQualified());
1161    Load->setAlignment(LV.getAlignment().getQuantity());
1162    return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1163                                                    "vecext"));
1164  }
1165
1166  // If this is a reference to a subset of the elements of a vector, either
1167  // shuffle the input or extract/insert them as appropriate.
1168  if (LV.isExtVectorElt())
1169    return EmitLoadOfExtVectorElementLValue(LV);
1170
1171  assert(LV.isBitField() && "Unknown LValue type!");
1172  return EmitLoadOfBitfieldLValue(LV);
1173}
1174
1175RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1176  const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1177
1178  // Get the output type.
1179  llvm::Type *ResLTy = ConvertType(LV.getType());
1180
1181  llvm::Value *Ptr = LV.getBitFieldAddr();
1182  llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(),
1183                                        "bf.load");
1184  cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment);
1185
1186  if (Info.IsSigned) {
1187    assert((Info.Offset + Info.Size) <= Info.StorageSize);
1188    unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
1189    if (HighBits)
1190      Val = Builder.CreateShl(Val, HighBits, "bf.shl");
1191    if (Info.Offset + HighBits)
1192      Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
1193  } else {
1194    if (Info.Offset)
1195      Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
1196    if (Info.Offset + Info.Size < Info.StorageSize)
1197      Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
1198                                                              Info.Size),
1199                              "bf.clear");
1200  }
1201  Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
1202
1203  return RValue::get(Val);
1204}
1205
1206// If this is a reference to a subset of the elements of a vector, create an
1207// appropriate shufflevector.
1208RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1209  llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1210                                            LV.isVolatileQualified());
1211  Load->setAlignment(LV.getAlignment().getQuantity());
1212  llvm::Value *Vec = Load;
1213
1214  const llvm::Constant *Elts = LV.getExtVectorElts();
1215
1216  // If the result of the expression is a non-vector type, we must be extracting
1217  // a single element.  Just codegen as an extractelement.
1218  const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1219  if (!ExprVT) {
1220    unsigned InIdx = getAccessedFieldNo(0, Elts);
1221    llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1222    return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1223  }
1224
1225  // Always use shuffle vector to try to retain the original program structure
1226  unsigned NumResultElts = ExprVT->getNumElements();
1227
1228  SmallVector<llvm::Constant*, 4> Mask;
1229  for (unsigned i = 0; i != NumResultElts; ++i)
1230    Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1231
1232  llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1233  Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1234                                    MaskV);
1235  return RValue::get(Vec);
1236}
1237
1238
1239
1240/// EmitStoreThroughLValue - Store the specified rvalue into the specified
1241/// lvalue, where both are guaranteed to the have the same type, and that type
1242/// is 'Ty'.
1243void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1244  if (!Dst.isSimple()) {
1245    if (Dst.isVectorElt()) {
1246      // Read/modify/write the vector, inserting the new element.
1247      llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1248                                                Dst.isVolatileQualified());
1249      Load->setAlignment(Dst.getAlignment().getQuantity());
1250      llvm::Value *Vec = Load;
1251      Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1252                                        Dst.getVectorIdx(), "vecins");
1253      llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1254                                                   Dst.isVolatileQualified());
1255      Store->setAlignment(Dst.getAlignment().getQuantity());
1256      return;
1257    }
1258
1259    // If this is an update of extended vector elements, insert them as
1260    // appropriate.
1261    if (Dst.isExtVectorElt())
1262      return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1263
1264    assert(Dst.isBitField() && "Unknown LValue type");
1265    return EmitStoreThroughBitfieldLValue(Src, Dst);
1266  }
1267
1268  // There's special magic for assigning into an ARC-qualified l-value.
1269  if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1270    switch (Lifetime) {
1271    case Qualifiers::OCL_None:
1272      llvm_unreachable("present but none");
1273
1274    case Qualifiers::OCL_ExplicitNone:
1275      // nothing special
1276      break;
1277
1278    case Qualifiers::OCL_Strong:
1279      EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1280      return;
1281
1282    case Qualifiers::OCL_Weak:
1283      EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1284      return;
1285
1286    case Qualifiers::OCL_Autoreleasing:
1287      Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1288                                                     Src.getScalarVal()));
1289      // fall into the normal path
1290      break;
1291    }
1292  }
1293
1294  if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1295    // load of a __weak object.
1296    llvm::Value *LvalueDst = Dst.getAddress();
1297    llvm::Value *src = Src.getScalarVal();
1298     CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1299    return;
1300  }
1301
1302  if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1303    // load of a __strong object.
1304    llvm::Value *LvalueDst = Dst.getAddress();
1305    llvm::Value *src = Src.getScalarVal();
1306    if (Dst.isObjCIvar()) {
1307      assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1308      llvm::Type *ResultType = ConvertType(getContext().LongTy);
1309      llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1310      llvm::Value *dst = RHS;
1311      RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1312      llvm::Value *LHS =
1313        Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1314      llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1315      CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1316                                              BytesBetween);
1317    } else if (Dst.isGlobalObjCRef()) {
1318      CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1319                                                Dst.isThreadLocalRef());
1320    }
1321    else
1322      CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1323    return;
1324  }
1325
1326  assert(Src.isScalar() && "Can't emit an agg store with this method");
1327  EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1328}
1329
1330void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1331                                                     llvm::Value **Result) {
1332  const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1333  llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1334  llvm::Value *Ptr = Dst.getBitFieldAddr();
1335
1336  // Get the source value, truncated to the width of the bit-field.
1337  llvm::Value *SrcVal = Src.getScalarVal();
1338
1339  // Cast the source to the storage type and shift it into place.
1340  SrcVal = Builder.CreateIntCast(SrcVal,
1341                                 Ptr->getType()->getPointerElementType(),
1342                                 /*IsSigned=*/false);
1343  llvm::Value *MaskedVal = SrcVal;
1344
1345  // See if there are other bits in the bitfield's storage we'll need to load
1346  // and mask together with source before storing.
1347  if (Info.StorageSize != Info.Size) {
1348    assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
1349    llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
1350                                          "bf.load");
1351    cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment);
1352
1353    // Mask the source value as needed.
1354    if (!hasBooleanRepresentation(Dst.getType()))
1355      SrcVal = Builder.CreateAnd(SrcVal,
1356                                 llvm::APInt::getLowBitsSet(Info.StorageSize,
1357                                                            Info.Size),
1358                                 "bf.value");
1359    MaskedVal = SrcVal;
1360    if (Info.Offset)
1361      SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
1362
1363    // Mask out the original value.
1364    Val = Builder.CreateAnd(Val,
1365                            ~llvm::APInt::getBitsSet(Info.StorageSize,
1366                                                     Info.Offset,
1367                                                     Info.Offset + Info.Size),
1368                            "bf.clear");
1369
1370    // Or together the unchanged values and the source value.
1371    SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
1372  } else {
1373    assert(Info.Offset == 0);
1374  }
1375
1376  // Write the new value back out.
1377  llvm::StoreInst *Store = Builder.CreateStore(SrcVal, Ptr,
1378                                               Dst.isVolatileQualified());
1379  Store->setAlignment(Info.StorageAlignment);
1380
1381  // Return the new value of the bit-field, if requested.
1382  if (Result) {
1383    llvm::Value *ResultVal = MaskedVal;
1384
1385    // Sign extend the value if needed.
1386    if (Info.IsSigned) {
1387      assert(Info.Size <= Info.StorageSize);
1388      unsigned HighBits = Info.StorageSize - Info.Size;
1389      if (HighBits) {
1390        ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
1391        ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
1392      }
1393    }
1394
1395    ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
1396                                      "bf.result.cast");
1397    *Result = ResultVal;
1398  }
1399}
1400
1401void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1402                                                               LValue Dst) {
1403  // This access turns into a read/modify/write of the vector.  Load the input
1404  // value now.
1405  llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1406                                            Dst.isVolatileQualified());
1407  Load->setAlignment(Dst.getAlignment().getQuantity());
1408  llvm::Value *Vec = Load;
1409  const llvm::Constant *Elts = Dst.getExtVectorElts();
1410
1411  llvm::Value *SrcVal = Src.getScalarVal();
1412
1413  if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1414    unsigned NumSrcElts = VTy->getNumElements();
1415    unsigned NumDstElts =
1416       cast<llvm::VectorType>(Vec->getType())->getNumElements();
1417    if (NumDstElts == NumSrcElts) {
1418      // Use shuffle vector is the src and destination are the same number of
1419      // elements and restore the vector mask since it is on the side it will be
1420      // stored.
1421      SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1422      for (unsigned i = 0; i != NumSrcElts; ++i)
1423        Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1424
1425      llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1426      Vec = Builder.CreateShuffleVector(SrcVal,
1427                                        llvm::UndefValue::get(Vec->getType()),
1428                                        MaskV);
1429    } else if (NumDstElts > NumSrcElts) {
1430      // Extended the source vector to the same length and then shuffle it
1431      // into the destination.
1432      // FIXME: since we're shuffling with undef, can we just use the indices
1433      //        into that?  This could be simpler.
1434      SmallVector<llvm::Constant*, 4> ExtMask;
1435      for (unsigned i = 0; i != NumSrcElts; ++i)
1436        ExtMask.push_back(Builder.getInt32(i));
1437      ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1438      llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1439      llvm::Value *ExtSrcVal =
1440        Builder.CreateShuffleVector(SrcVal,
1441                                    llvm::UndefValue::get(SrcVal->getType()),
1442                                    ExtMaskV);
1443      // build identity
1444      SmallVector<llvm::Constant*, 4> Mask;
1445      for (unsigned i = 0; i != NumDstElts; ++i)
1446        Mask.push_back(Builder.getInt32(i));
1447
1448      // modify when what gets shuffled in
1449      for (unsigned i = 0; i != NumSrcElts; ++i)
1450        Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1451      llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1452      Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1453    } else {
1454      // We should never shorten the vector
1455      llvm_unreachable("unexpected shorten vector length");
1456    }
1457  } else {
1458    // If the Src is a scalar (not a vector) it must be updating one element.
1459    unsigned InIdx = getAccessedFieldNo(0, Elts);
1460    llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1461    Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1462  }
1463
1464  llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1465                                               Dst.isVolatileQualified());
1466  Store->setAlignment(Dst.getAlignment().getQuantity());
1467}
1468
1469// setObjCGCLValueClass - sets class of he lvalue for the purpose of
1470// generating write-barries API. It is currently a global, ivar,
1471// or neither.
1472static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1473                                 LValue &LV,
1474                                 bool IsMemberAccess=false) {
1475  if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1476    return;
1477
1478  if (isa<ObjCIvarRefExpr>(E)) {
1479    QualType ExpTy = E->getType();
1480    if (IsMemberAccess && ExpTy->isPointerType()) {
1481      // If ivar is a structure pointer, assigning to field of
1482      // this struct follows gcc's behavior and makes it a non-ivar
1483      // writer-barrier conservatively.
1484      ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1485      if (ExpTy->isRecordType()) {
1486        LV.setObjCIvar(false);
1487        return;
1488      }
1489    }
1490    LV.setObjCIvar(true);
1491    ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1492    LV.setBaseIvarExp(Exp->getBase());
1493    LV.setObjCArray(E->getType()->isArrayType());
1494    return;
1495  }
1496
1497  if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1498    if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1499      if (VD->hasGlobalStorage()) {
1500        LV.setGlobalObjCRef(true);
1501        LV.setThreadLocalRef(VD->isThreadSpecified());
1502      }
1503    }
1504    LV.setObjCArray(E->getType()->isArrayType());
1505    return;
1506  }
1507
1508  if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1509    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1510    return;
1511  }
1512
1513  if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1514    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1515    if (LV.isObjCIvar()) {
1516      // If cast is to a structure pointer, follow gcc's behavior and make it
1517      // a non-ivar write-barrier.
1518      QualType ExpTy = E->getType();
1519      if (ExpTy->isPointerType())
1520        ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1521      if (ExpTy->isRecordType())
1522        LV.setObjCIvar(false);
1523    }
1524    return;
1525  }
1526
1527  if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1528    setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1529    return;
1530  }
1531
1532  if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1533    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1534    return;
1535  }
1536
1537  if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1538    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1539    return;
1540  }
1541
1542  if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1543    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1544    return;
1545  }
1546
1547  if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1548    setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1549    if (LV.isObjCIvar() && !LV.isObjCArray())
1550      // Using array syntax to assigning to what an ivar points to is not
1551      // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1552      LV.setObjCIvar(false);
1553    else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1554      // Using array syntax to assigning to what global points to is not
1555      // same as assigning to the global itself. {id *G;} G[i] = 0;
1556      LV.setGlobalObjCRef(false);
1557    return;
1558  }
1559
1560  if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1561    setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1562    // We don't know if member is an 'ivar', but this flag is looked at
1563    // only in the context of LV.isObjCIvar().
1564    LV.setObjCArray(E->getType()->isArrayType());
1565    return;
1566  }
1567}
1568
1569static llvm::Value *
1570EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1571                                llvm::Value *V, llvm::Type *IRType,
1572                                StringRef Name = StringRef()) {
1573  unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1574  return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1575}
1576
1577static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1578                                      const Expr *E, const VarDecl *VD) {
1579  assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1580         "Var decl must have external storage or be a file var decl!");
1581
1582  llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1583  llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1584  V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1585  CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1586  QualType T = E->getType();
1587  LValue LV;
1588  if (VD->getType()->isReferenceType()) {
1589    llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1590    LI->setAlignment(Alignment.getQuantity());
1591    V = LI;
1592    LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1593  } else {
1594    LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1595  }
1596  setObjCGCLValueClass(CGF.getContext(), E, LV);
1597  return LV;
1598}
1599
1600static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1601                                     const Expr *E, const FunctionDecl *FD) {
1602  llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1603  if (!FD->hasPrototype()) {
1604    if (const FunctionProtoType *Proto =
1605            FD->getType()->getAs<FunctionProtoType>()) {
1606      // Ugly case: for a K&R-style definition, the type of the definition
1607      // isn't the same as the type of a use.  Correct for this with a
1608      // bitcast.
1609      QualType NoProtoType =
1610          CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1611      NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1612      V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1613    }
1614  }
1615  CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1616  return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1617}
1618
1619LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1620  const NamedDecl *ND = E->getDecl();
1621  CharUnits Alignment = getContext().getDeclAlign(ND);
1622  QualType T = E->getType();
1623
1624  // A DeclRefExpr for a reference initialized by a constant expression can
1625  // appear without being odr-used. Directly emit the constant initializer.
1626  if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1627    const Expr *Init = VD->getAnyInitializer(VD);
1628    if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
1629        VD->isUsableInConstantExpressions(getContext()) &&
1630        VD->checkInitIsICE()) {
1631      llvm::Constant *Val =
1632        CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
1633      assert(Val && "failed to emit reference constant expression");
1634      // FIXME: Eventually we will want to emit vector element references.
1635      return MakeAddrLValue(Val, T, Alignment);
1636    }
1637  }
1638
1639  // FIXME: We should be able to assert this for FunctionDecls as well!
1640  // FIXME: We should be able to assert this for all DeclRefExprs, not just
1641  // those with a valid source location.
1642  assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1643          !E->getLocation().isValid()) &&
1644         "Should not use decl without marking it used!");
1645
1646  if (ND->hasAttr<WeakRefAttr>()) {
1647    const ValueDecl *VD = cast<ValueDecl>(ND);
1648    llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1649    return MakeAddrLValue(Aliasee, T, Alignment);
1650  }
1651
1652  if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1653    // Check if this is a global variable.
1654    if (VD->hasExternalStorage() || VD->isFileVarDecl())
1655      return EmitGlobalVarDeclLValue(*this, E, VD);
1656
1657    bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1658
1659    bool NonGCable = VD->hasLocalStorage() &&
1660                     !VD->getType()->isReferenceType() &&
1661                     !isBlockVariable;
1662
1663    llvm::Value *V = LocalDeclMap[VD];
1664    if (!V && VD->isStaticLocal())
1665      V = CGM.getStaticLocalDeclAddress(VD);
1666
1667    // Use special handling for lambdas.
1668    if (!V) {
1669      if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1670        QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1671        LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1672                                                     LambdaTagType);
1673        return EmitLValueForField(LambdaLV, FD);
1674      }
1675
1676      assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1677      return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1678                            T, Alignment);
1679    }
1680
1681    assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1682
1683    if (isBlockVariable)
1684      V = BuildBlockByrefAddress(V, VD);
1685
1686    LValue LV;
1687    if (VD->getType()->isReferenceType()) {
1688      llvm::LoadInst *LI = Builder.CreateLoad(V);
1689      LI->setAlignment(Alignment.getQuantity());
1690      V = LI;
1691      LV = MakeNaturalAlignAddrLValue(V, T);
1692    } else {
1693      LV = MakeAddrLValue(V, T, Alignment);
1694    }
1695
1696    if (NonGCable) {
1697      LV.getQuals().removeObjCGCAttr();
1698      LV.setNonGC(true);
1699    }
1700    setObjCGCLValueClass(getContext(), E, LV);
1701    return LV;
1702  }
1703
1704  if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1705    return EmitFunctionDeclLValue(*this, E, fn);
1706
1707  llvm_unreachable("Unhandled DeclRefExpr");
1708}
1709
1710LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1711  // __extension__ doesn't affect lvalue-ness.
1712  if (E->getOpcode() == UO_Extension)
1713    return EmitLValue(E->getSubExpr());
1714
1715  QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1716  switch (E->getOpcode()) {
1717  default: llvm_unreachable("Unknown unary operator lvalue!");
1718  case UO_Deref: {
1719    QualType T = E->getSubExpr()->getType()->getPointeeType();
1720    assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1721
1722    LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1723    LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1724
1725    // We should not generate __weak write barrier on indirect reference
1726    // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1727    // But, we continue to generate __strong write barrier on indirect write
1728    // into a pointer to object.
1729    if (getLangOpts().ObjC1 &&
1730        getLangOpts().getGC() != LangOptions::NonGC &&
1731        LV.isObjCWeak())
1732      LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1733    return LV;
1734  }
1735  case UO_Real:
1736  case UO_Imag: {
1737    LValue LV = EmitLValue(E->getSubExpr());
1738    assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1739    llvm::Value *Addr = LV.getAddress();
1740
1741    // __real is valid on scalars.  This is a faster way of testing that.
1742    // __imag can only produce an rvalue on scalars.
1743    if (E->getOpcode() == UO_Real &&
1744        !cast<llvm::PointerType>(Addr->getType())
1745           ->getElementType()->isStructTy()) {
1746      assert(E->getSubExpr()->getType()->isArithmeticType());
1747      return LV;
1748    }
1749
1750    assert(E->getSubExpr()->getType()->isAnyComplexType());
1751
1752    unsigned Idx = E->getOpcode() == UO_Imag;
1753    return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1754                                                  Idx, "idx"),
1755                          ExprTy);
1756  }
1757  case UO_PreInc:
1758  case UO_PreDec: {
1759    LValue LV = EmitLValue(E->getSubExpr());
1760    bool isInc = E->getOpcode() == UO_PreInc;
1761
1762    if (E->getType()->isAnyComplexType())
1763      EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1764    else
1765      EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1766    return LV;
1767  }
1768  }
1769}
1770
1771LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1772  return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1773                        E->getType());
1774}
1775
1776LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1777  return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1778                        E->getType());
1779}
1780
1781static llvm::Constant*
1782GetAddrOfConstantWideString(StringRef Str,
1783                            const char *GlobalName,
1784                            ASTContext &Context,
1785                            QualType Ty, SourceLocation Loc,
1786                            CodeGenModule &CGM) {
1787
1788  StringLiteral *SL = StringLiteral::Create(Context,
1789                                            Str,
1790                                            StringLiteral::Wide,
1791                                            /*Pascal = */false,
1792                                            Ty, Loc);
1793  llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
1794  llvm::GlobalVariable *GV =
1795    new llvm::GlobalVariable(CGM.getModule(), C->getType(),
1796                             !CGM.getLangOpts().WritableStrings,
1797                             llvm::GlobalValue::PrivateLinkage,
1798                             C, GlobalName);
1799  const unsigned WideAlignment =
1800    Context.getTypeAlignInChars(Ty).getQuantity();
1801  GV->setAlignment(WideAlignment);
1802  return GV;
1803}
1804
1805static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
1806                                    SmallString<32>& Target) {
1807  Target.resize(CharByteWidth * (Source.size() + 1));
1808  char *ResultPtr = &Target[0];
1809  const UTF8 *ErrorPtr;
1810  bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
1811  (void)success;
1812  assert(success);
1813  Target.resize(ResultPtr - &Target[0]);
1814}
1815
1816LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1817  switch (E->getIdentType()) {
1818  default:
1819    return EmitUnsupportedLValue(E, "predefined expression");
1820
1821  case PredefinedExpr::Func:
1822  case PredefinedExpr::Function:
1823  case PredefinedExpr::LFunction:
1824  case PredefinedExpr::PrettyFunction: {
1825    unsigned IdentType = E->getIdentType();
1826    std::string GlobalVarName;
1827
1828    switch (IdentType) {
1829    default: llvm_unreachable("Invalid type");
1830    case PredefinedExpr::Func:
1831      GlobalVarName = "__func__.";
1832      break;
1833    case PredefinedExpr::Function:
1834      GlobalVarName = "__FUNCTION__.";
1835      break;
1836    case PredefinedExpr::LFunction:
1837      GlobalVarName = "L__FUNCTION__.";
1838      break;
1839    case PredefinedExpr::PrettyFunction:
1840      GlobalVarName = "__PRETTY_FUNCTION__.";
1841      break;
1842    }
1843
1844    StringRef FnName = CurFn->getName();
1845    if (FnName.startswith("\01"))
1846      FnName = FnName.substr(1);
1847    GlobalVarName += FnName;
1848
1849    const Decl *CurDecl = CurCodeDecl;
1850    if (CurDecl == 0)
1851      CurDecl = getContext().getTranslationUnitDecl();
1852
1853    std::string FunctionName =
1854        (isa<BlockDecl>(CurDecl)
1855         ? FnName.str()
1856         : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
1857                                       CurDecl));
1858
1859    const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
1860    llvm::Constant *C;
1861    if (ElemType->isWideCharType()) {
1862      SmallString<32> RawChars;
1863      ConvertUTF8ToWideString(
1864          getContext().getTypeSizeInChars(ElemType).getQuantity(),
1865          FunctionName, RawChars);
1866      C = GetAddrOfConstantWideString(RawChars,
1867                                      GlobalVarName.c_str(),
1868                                      getContext(),
1869                                      E->getType(),
1870                                      E->getLocation(),
1871                                      CGM);
1872    } else {
1873      C = CGM.GetAddrOfConstantCString(FunctionName,
1874                                       GlobalVarName.c_str(),
1875                                       1);
1876    }
1877    return MakeAddrLValue(C, E->getType());
1878  }
1879  }
1880}
1881
1882/// Emit a type description suitable for use by a runtime sanitizer library. The
1883/// format of a type descriptor is
1884///
1885/// \code
1886///   { i16 TypeKind, i16 TypeInfo }
1887/// \endcode
1888///
1889/// followed by an array of i8 containing the type name. TypeKind is 0 for an
1890/// integer, 1 for a floating point value, and -1 for anything else.
1891llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
1892  // FIXME: Only emit each type's descriptor once.
1893  uint16_t TypeKind = -1;
1894  uint16_t TypeInfo = 0;
1895
1896  if (T->isIntegerType()) {
1897    TypeKind = 0;
1898    TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
1899               (T->isSignedIntegerType() ? 1 : 0);
1900  } else if (T->isFloatingType()) {
1901    TypeKind = 1;
1902    TypeInfo = getContext().getTypeSize(T);
1903  }
1904
1905  // Format the type name as if for a diagnostic, including quotes and
1906  // optionally an 'aka'.
1907  llvm::SmallString<32> Buffer;
1908  CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
1909                                    (intptr_t)T.getAsOpaquePtr(),
1910                                    0, 0, 0, 0, 0, 0, Buffer,
1911                                    ArrayRef<intptr_t>());
1912
1913  llvm::Constant *Components[] = {
1914    Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
1915    llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
1916  };
1917  llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
1918
1919  llvm::GlobalVariable *GV =
1920    new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(),
1921                             /*isConstant=*/true,
1922                             llvm::GlobalVariable::PrivateLinkage,
1923                             Descriptor);
1924  GV->setUnnamedAddr(true);
1925  return GV;
1926}
1927
1928llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
1929  llvm::Type *TargetTy = IntPtrTy;
1930
1931  // Integers which fit in intptr_t are zero-extended and passed directly.
1932  if (V->getType()->isIntegerTy() &&
1933      V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
1934    return Builder.CreateZExt(V, TargetTy);
1935
1936  // Pointers are passed directly, everything else is passed by address.
1937  if (!V->getType()->isPointerTy()) {
1938    llvm::Value *Ptr = Builder.CreateAlloca(V->getType());
1939    Builder.CreateStore(V, Ptr);
1940    V = Ptr;
1941  }
1942  return Builder.CreatePtrToInt(V, TargetTy);
1943}
1944
1945/// \brief Emit a representation of a SourceLocation for passing to a handler
1946/// in a sanitizer runtime library. The format for this data is:
1947/// \code
1948///   struct SourceLocation {
1949///     const char *Filename;
1950///     int32_t Line, Column;
1951///   };
1952/// \endcode
1953/// For an invalid SourceLocation, the Filename pointer is null.
1954llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
1955  PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
1956
1957  llvm::Constant *Data[] = {
1958    // FIXME: Only emit each file name once.
1959    PLoc.isValid() ? cast<llvm::Constant>(
1960                       Builder.CreateGlobalStringPtr(PLoc.getFilename()))
1961                   : llvm::Constant::getNullValue(Int8PtrTy),
1962    Builder.getInt32(PLoc.getLine()),
1963    Builder.getInt32(PLoc.getColumn())
1964  };
1965
1966  return llvm::ConstantStruct::getAnon(Data);
1967}
1968
1969void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
1970                                llvm::ArrayRef<llvm::Constant *> StaticArgs,
1971                                llvm::ArrayRef<llvm::Value *> DynamicArgs,
1972                                CheckRecoverableKind RecoverKind) {
1973  llvm::BasicBlock *Cont = createBasicBlock("cont");
1974
1975  llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
1976  Builder.CreateCondBr(Checked, Cont, Handler);
1977  EmitBlock(Handler);
1978
1979  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
1980  llvm::GlobalValue *InfoPtr =
1981      new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true,
1982                               llvm::GlobalVariable::PrivateLinkage, Info);
1983  InfoPtr->setUnnamedAddr(true);
1984
1985  llvm::SmallVector<llvm::Value *, 4> Args;
1986  llvm::SmallVector<llvm::Type *, 4> ArgTypes;
1987  Args.reserve(DynamicArgs.size() + 1);
1988  ArgTypes.reserve(DynamicArgs.size() + 1);
1989
1990  // Handler functions take an i8* pointing to the (handler-specific) static
1991  // information block, followed by a sequence of intptr_t arguments
1992  // representing operand values.
1993  Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
1994  ArgTypes.push_back(Int8PtrTy);
1995  for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
1996    Args.push_back(EmitCheckValue(DynamicArgs[i]));
1997    ArgTypes.push_back(IntPtrTy);
1998  }
1999
2000  bool Recover = (RecoverKind == CRK_AlwaysRecoverable) ||
2001                 ((RecoverKind == CRK_Recoverable) &&
2002                   CGM.getCodeGenOpts().SanitizeRecover);
2003
2004  llvm::FunctionType *FnType =
2005    llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
2006  llvm::AttrBuilder B;
2007  if (!Recover) {
2008    B.addAttribute(llvm::Attributes::NoReturn)
2009     .addAttribute(llvm::Attributes::NoUnwind);
2010  }
2011  B.addAttribute(llvm::Attributes::UWTable);
2012
2013  // Checks that have two variants use a suffix to differentiate them
2014  bool NeedsAbortSuffix = (RecoverKind != CRK_Unrecoverable) &&
2015                           !CGM.getCodeGenOpts().SanitizeRecover;
2016  std::string FunctionName = ("__ubsan_handle_" + CheckName +
2017                              (NeedsAbortSuffix? "_abort" : "")).str();
2018  llvm::Value *Fn =
2019    CGM.CreateRuntimeFunction(FnType, FunctionName,
2020                              llvm::Attributes::get(getLLVMContext(), B));
2021  llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args);
2022  if (Recover) {
2023    Builder.CreateBr(Cont);
2024  } else {
2025    HandlerCall->setDoesNotReturn();
2026    HandlerCall->setDoesNotThrow();
2027    Builder.CreateUnreachable();
2028  }
2029
2030  EmitBlock(Cont);
2031}
2032
2033void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) {
2034  llvm::BasicBlock *Cont = createBasicBlock("cont");
2035
2036  // If we're optimizing, collapse all calls to trap down to just one per
2037  // function to save on code size.
2038  if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
2039    TrapBB = createBasicBlock("trap");
2040    Builder.CreateCondBr(Checked, Cont, TrapBB);
2041    EmitBlock(TrapBB);
2042    llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
2043    llvm::CallInst *TrapCall = Builder.CreateCall(F);
2044    TrapCall->setDoesNotReturn();
2045    TrapCall->setDoesNotThrow();
2046    Builder.CreateUnreachable();
2047  } else {
2048    Builder.CreateCondBr(Checked, Cont, TrapBB);
2049  }
2050
2051  EmitBlock(Cont);
2052}
2053
2054/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
2055/// array to pointer, return the array subexpression.
2056static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
2057  // If this isn't just an array->pointer decay, bail out.
2058  const CastExpr *CE = dyn_cast<CastExpr>(E);
2059  if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
2060    return 0;
2061
2062  // If this is a decay from variable width array, bail out.
2063  const Expr *SubExpr = CE->getSubExpr();
2064  if (SubExpr->getType()->isVariableArrayType())
2065    return 0;
2066
2067  return SubExpr;
2068}
2069
2070LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
2071  // The index must always be an integer, which is not an aggregate.  Emit it.
2072  llvm::Value *Idx = EmitScalarExpr(E->getIdx());
2073  QualType IdxTy  = E->getIdx()->getType();
2074  bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
2075
2076  // If the base is a vector type, then we are forming a vector element lvalue
2077  // with this subscript.
2078  if (E->getBase()->getType()->isVectorType()) {
2079    // Emit the vector as an lvalue to get its address.
2080    LValue LHS = EmitLValue(E->getBase());
2081    assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
2082    Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
2083    return LValue::MakeVectorElt(LHS.getAddress(), Idx,
2084                                 E->getBase()->getType(), LHS.getAlignment());
2085  }
2086
2087  // Extend or truncate the index type to 32 or 64-bits.
2088  if (Idx->getType() != IntPtrTy)
2089    Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
2090
2091  // We know that the pointer points to a type of the correct size, unless the
2092  // size is a VLA or Objective-C interface.
2093  llvm::Value *Address = 0;
2094  CharUnits ArrayAlignment;
2095  if (const VariableArrayType *vla =
2096        getContext().getAsVariableArrayType(E->getType())) {
2097    // The base must be a pointer, which is not an aggregate.  Emit
2098    // it.  It needs to be emitted first in case it's what captures
2099    // the VLA bounds.
2100    Address = EmitScalarExpr(E->getBase());
2101
2102    // The element count here is the total number of non-VLA elements.
2103    llvm::Value *numElements = getVLASize(vla).first;
2104
2105    // Effectively, the multiply by the VLA size is part of the GEP.
2106    // GEP indexes are signed, and scaling an index isn't permitted to
2107    // signed-overflow, so we use the same semantics for our explicit
2108    // multiply.  We suppress this if overflow is not undefined behavior.
2109    if (getLangOpts().isSignedOverflowDefined()) {
2110      Idx = Builder.CreateMul(Idx, numElements);
2111      Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2112    } else {
2113      Idx = Builder.CreateNSWMul(Idx, numElements);
2114      Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
2115    }
2116  } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
2117    // Indexing over an interface, as in "NSString *P; P[4];"
2118    llvm::Value *InterfaceSize =
2119      llvm::ConstantInt::get(Idx->getType(),
2120          getContext().getTypeSizeInChars(OIT).getQuantity());
2121
2122    Idx = Builder.CreateMul(Idx, InterfaceSize);
2123
2124    // The base must be a pointer, which is not an aggregate.  Emit it.
2125    llvm::Value *Base = EmitScalarExpr(E->getBase());
2126    Address = EmitCastToVoidPtr(Base);
2127    Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2128    Address = Builder.CreateBitCast(Address, Base->getType());
2129  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
2130    // If this is A[i] where A is an array, the frontend will have decayed the
2131    // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
2132    // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
2133    // "gep x, i" here.  Emit one "gep A, 0, i".
2134    assert(Array->getType()->isArrayType() &&
2135           "Array to pointer decay must have array source type!");
2136    LValue ArrayLV = EmitLValue(Array);
2137    llvm::Value *ArrayPtr = ArrayLV.getAddress();
2138    llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2139    llvm::Value *Args[] = { Zero, Idx };
2140
2141    // Propagate the alignment from the array itself to the result.
2142    ArrayAlignment = ArrayLV.getAlignment();
2143
2144    if (getLangOpts().isSignedOverflowDefined())
2145      Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
2146    else
2147      Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
2148  } else {
2149    // The base must be a pointer, which is not an aggregate.  Emit it.
2150    llvm::Value *Base = EmitScalarExpr(E->getBase());
2151    if (getLangOpts().isSignedOverflowDefined())
2152      Address = Builder.CreateGEP(Base, Idx, "arrayidx");
2153    else
2154      Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
2155  }
2156
2157  QualType T = E->getBase()->getType()->getPointeeType();
2158  assert(!T.isNull() &&
2159         "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
2160
2161
2162  // Limit the alignment to that of the result type.
2163  LValue LV;
2164  if (!ArrayAlignment.isZero()) {
2165    CharUnits Align = getContext().getTypeAlignInChars(T);
2166    ArrayAlignment = std::min(Align, ArrayAlignment);
2167    LV = MakeAddrLValue(Address, T, ArrayAlignment);
2168  } else {
2169    LV = MakeNaturalAlignAddrLValue(Address, T);
2170  }
2171
2172  LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
2173
2174  if (getLangOpts().ObjC1 &&
2175      getLangOpts().getGC() != LangOptions::NonGC) {
2176    LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2177    setObjCGCLValueClass(getContext(), E, LV);
2178  }
2179  return LV;
2180}
2181
2182static
2183llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
2184                                       SmallVector<unsigned, 4> &Elts) {
2185  SmallVector<llvm::Constant*, 4> CElts;
2186  for (unsigned i = 0, e = Elts.size(); i != e; ++i)
2187    CElts.push_back(Builder.getInt32(Elts[i]));
2188
2189  return llvm::ConstantVector::get(CElts);
2190}
2191
2192LValue CodeGenFunction::
2193EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
2194  // Emit the base vector as an l-value.
2195  LValue Base;
2196
2197  // ExtVectorElementExpr's base can either be a vector or pointer to vector.
2198  if (E->isArrow()) {
2199    // If it is a pointer to a vector, emit the address and form an lvalue with
2200    // it.
2201    llvm::Value *Ptr = EmitScalarExpr(E->getBase());
2202    const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
2203    Base = MakeAddrLValue(Ptr, PT->getPointeeType());
2204    Base.getQuals().removeObjCGCAttr();
2205  } else if (E->getBase()->isGLValue()) {
2206    // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
2207    // emit the base as an lvalue.
2208    assert(E->getBase()->getType()->isVectorType());
2209    Base = EmitLValue(E->getBase());
2210  } else {
2211    // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
2212    assert(E->getBase()->getType()->isVectorType() &&
2213           "Result must be a vector");
2214    llvm::Value *Vec = EmitScalarExpr(E->getBase());
2215
2216    // Store the vector to memory (because LValue wants an address).
2217    llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
2218    Builder.CreateStore(Vec, VecMem);
2219    Base = MakeAddrLValue(VecMem, E->getBase()->getType());
2220  }
2221
2222  QualType type =
2223    E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2224
2225  // Encode the element access list into a vector of unsigned indices.
2226  SmallVector<unsigned, 4> Indices;
2227  E->getEncodedElementAccess(Indices);
2228
2229  if (Base.isSimple()) {
2230    llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
2231    return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
2232                                    Base.getAlignment());
2233  }
2234  assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2235
2236  llvm::Constant *BaseElts = Base.getExtVectorElts();
2237  SmallVector<llvm::Constant *, 4> CElts;
2238
2239  for (unsigned i = 0, e = Indices.size(); i != e; ++i)
2240    CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
2241  llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2242  return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
2243                                  Base.getAlignment());
2244}
2245
2246LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
2247  Expr *BaseExpr = E->getBase();
2248
2249  // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
2250  LValue BaseLV;
2251  if (E->isArrow()) {
2252    llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
2253    QualType PtrTy = BaseExpr->getType()->getPointeeType();
2254    EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
2255    BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
2256  } else
2257    BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
2258
2259  NamedDecl *ND = E->getMemberDecl();
2260  if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
2261    LValue LV = EmitLValueForField(BaseLV, Field);
2262    setObjCGCLValueClass(getContext(), E, LV);
2263    return LV;
2264  }
2265
2266  if (VarDecl *VD = dyn_cast<VarDecl>(ND))
2267    return EmitGlobalVarDeclLValue(*this, E, VD);
2268
2269  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
2270    return EmitFunctionDeclLValue(*this, E, FD);
2271
2272  llvm_unreachable("Unhandled member declaration!");
2273}
2274
2275LValue CodeGenFunction::EmitLValueForField(LValue base,
2276                                           const FieldDecl *field) {
2277  if (field->isBitField()) {
2278    const CGRecordLayout &RL =
2279      CGM.getTypes().getCGRecordLayout(field->getParent());
2280    const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
2281    llvm::Value *Addr = base.getAddress();
2282    unsigned Idx = RL.getLLVMFieldNo(field);
2283    if (Idx != 0)
2284      // For structs, we GEP to the field that the record layout suggests.
2285      Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
2286    // Get the access type.
2287    llvm::Type *PtrTy = llvm::Type::getIntNPtrTy(
2288      getLLVMContext(), Info.StorageSize,
2289      CGM.getContext().getTargetAddressSpace(base.getType()));
2290    if (Addr->getType() != PtrTy)
2291      Addr = Builder.CreateBitCast(Addr, PtrTy);
2292
2293    QualType fieldType =
2294      field->getType().withCVRQualifiers(base.getVRQualifiers());
2295    return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment());
2296  }
2297
2298  const RecordDecl *rec = field->getParent();
2299  QualType type = field->getType();
2300  CharUnits alignment = getContext().getDeclAlign(field);
2301
2302  // FIXME: It should be impossible to have an LValue without alignment for a
2303  // complete type.
2304  if (!base.getAlignment().isZero())
2305    alignment = std::min(alignment, base.getAlignment());
2306
2307  bool mayAlias = rec->hasAttr<MayAliasAttr>();
2308
2309  llvm::Value *addr = base.getAddress();
2310  unsigned cvr = base.getVRQualifiers();
2311  if (rec->isUnion()) {
2312    // For unions, there is no pointer adjustment.
2313    assert(!type->isReferenceType() && "union has reference member");
2314  } else {
2315    // For structs, we GEP to the field that the record layout suggests.
2316    unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2317    addr = Builder.CreateStructGEP(addr, idx, field->getName());
2318
2319    // If this is a reference field, load the reference right now.
2320    if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2321      llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2322      if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2323      load->setAlignment(alignment.getQuantity());
2324
2325      if (CGM.shouldUseTBAA()) {
2326        llvm::MDNode *tbaa;
2327        if (mayAlias)
2328          tbaa = CGM.getTBAAInfo(getContext().CharTy);
2329        else
2330          tbaa = CGM.getTBAAInfo(type);
2331        CGM.DecorateInstruction(load, tbaa);
2332      }
2333
2334      addr = load;
2335      mayAlias = false;
2336      type = refType->getPointeeType();
2337      if (type->isIncompleteType())
2338        alignment = CharUnits();
2339      else
2340        alignment = getContext().getTypeAlignInChars(type);
2341      cvr = 0; // qualifiers don't recursively apply to referencee
2342    }
2343  }
2344
2345  // Make sure that the address is pointing to the right type.  This is critical
2346  // for both unions and structs.  A union needs a bitcast, a struct element
2347  // will need a bitcast if the LLVM type laid out doesn't match the desired
2348  // type.
2349  addr = EmitBitCastOfLValueToProperType(*this, addr,
2350                                         CGM.getTypes().ConvertTypeForMem(type),
2351                                         field->getName());
2352
2353  if (field->hasAttr<AnnotateAttr>())
2354    addr = EmitFieldAnnotations(field, addr);
2355
2356  LValue LV = MakeAddrLValue(addr, type, alignment);
2357  LV.getQuals().addCVRQualifiers(cvr);
2358
2359  // __weak attribute on a field is ignored.
2360  if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2361    LV.getQuals().removeObjCGCAttr();
2362
2363  // Fields of may_alias structs act like 'char' for TBAA purposes.
2364  // FIXME: this should get propagated down through anonymous structs
2365  // and unions.
2366  if (mayAlias && LV.getTBAAInfo())
2367    LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2368
2369  return LV;
2370}
2371
2372LValue
2373CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2374                                                  const FieldDecl *Field) {
2375  QualType FieldType = Field->getType();
2376
2377  if (!FieldType->isReferenceType())
2378    return EmitLValueForField(Base, Field);
2379
2380  const CGRecordLayout &RL =
2381    CGM.getTypes().getCGRecordLayout(Field->getParent());
2382  unsigned idx = RL.getLLVMFieldNo(Field);
2383  llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2384  assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2385
2386  // Make sure that the address is pointing to the right type.  This is critical
2387  // for both unions and structs.  A union needs a bitcast, a struct element
2388  // will need a bitcast if the LLVM type laid out doesn't match the desired
2389  // type.
2390  llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2391  V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2392
2393  CharUnits Alignment = getContext().getDeclAlign(Field);
2394
2395  // FIXME: It should be impossible to have an LValue without alignment for a
2396  // complete type.
2397  if (!Base.getAlignment().isZero())
2398    Alignment = std::min(Alignment, Base.getAlignment());
2399
2400  return MakeAddrLValue(V, FieldType, Alignment);
2401}
2402
2403LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2404  if (E->isFileScope()) {
2405    llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2406    return MakeAddrLValue(GlobalPtr, E->getType());
2407  }
2408  if (E->getType()->isVariablyModifiedType())
2409    // make sure to emit the VLA size.
2410    EmitVariablyModifiedType(E->getType());
2411
2412  llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2413  const Expr *InitExpr = E->getInitializer();
2414  LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2415
2416  EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2417                   /*Init*/ true);
2418
2419  return Result;
2420}
2421
2422LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
2423  if (!E->isGLValue())
2424    // Initializing an aggregate temporary in C++11: T{...}.
2425    return EmitAggExprToLValue(E);
2426
2427  // An lvalue initializer list must be initializing a reference.
2428  assert(E->getNumInits() == 1 && "reference init with multiple values");
2429  return EmitLValue(E->getInit(0));
2430}
2431
2432LValue CodeGenFunction::
2433EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2434  if (!expr->isGLValue()) {
2435    // ?: here should be an aggregate.
2436    assert((hasAggregateLLVMType(expr->getType()) &&
2437            !expr->getType()->isAnyComplexType()) &&
2438           "Unexpected conditional operator!");
2439    return EmitAggExprToLValue(expr);
2440  }
2441
2442  OpaqueValueMapping binding(*this, expr);
2443
2444  const Expr *condExpr = expr->getCond();
2445  bool CondExprBool;
2446  if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2447    const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2448    if (!CondExprBool) std::swap(live, dead);
2449
2450    if (!ContainsLabel(dead))
2451      return EmitLValue(live);
2452  }
2453
2454  llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2455  llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2456  llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2457
2458  ConditionalEvaluation eval(*this);
2459  EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2460
2461  // Any temporaries created here are conditional.
2462  EmitBlock(lhsBlock);
2463  eval.begin(*this);
2464  LValue lhs = EmitLValue(expr->getTrueExpr());
2465  eval.end(*this);
2466
2467  if (!lhs.isSimple())
2468    return EmitUnsupportedLValue(expr, "conditional operator");
2469
2470  lhsBlock = Builder.GetInsertBlock();
2471  Builder.CreateBr(contBlock);
2472
2473  // Any temporaries created here are conditional.
2474  EmitBlock(rhsBlock);
2475  eval.begin(*this);
2476  LValue rhs = EmitLValue(expr->getFalseExpr());
2477  eval.end(*this);
2478  if (!rhs.isSimple())
2479    return EmitUnsupportedLValue(expr, "conditional operator");
2480  rhsBlock = Builder.GetInsertBlock();
2481
2482  EmitBlock(contBlock);
2483
2484  llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2485                                         "cond-lvalue");
2486  phi->addIncoming(lhs.getAddress(), lhsBlock);
2487  phi->addIncoming(rhs.getAddress(), rhsBlock);
2488  return MakeAddrLValue(phi, expr->getType());
2489}
2490
2491/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
2492/// type. If the cast is to a reference, we can have the usual lvalue result,
2493/// otherwise if a cast is needed by the code generator in an lvalue context,
2494/// then it must mean that we need the address of an aggregate in order to
2495/// access one of its members.  This can happen for all the reasons that casts
2496/// are permitted with aggregate result, including noop aggregate casts, and
2497/// cast from scalar to union.
2498LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2499  switch (E->getCastKind()) {
2500  case CK_ToVoid:
2501    return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2502
2503  case CK_Dependent:
2504    llvm_unreachable("dependent cast kind in IR gen!");
2505
2506  case CK_BuiltinFnToFnPtr:
2507    llvm_unreachable("builtin functions are handled elsewhere");
2508
2509  // These two casts are currently treated as no-ops, although they could
2510  // potentially be real operations depending on the target's ABI.
2511  case CK_NonAtomicToAtomic:
2512  case CK_AtomicToNonAtomic:
2513
2514  case CK_NoOp:
2515  case CK_LValueToRValue:
2516    if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2517        || E->getType()->isRecordType())
2518      return EmitLValue(E->getSubExpr());
2519    // Fall through to synthesize a temporary.
2520
2521  case CK_BitCast:
2522  case CK_ArrayToPointerDecay:
2523  case CK_FunctionToPointerDecay:
2524  case CK_NullToMemberPointer:
2525  case CK_NullToPointer:
2526  case CK_IntegralToPointer:
2527  case CK_PointerToIntegral:
2528  case CK_PointerToBoolean:
2529  case CK_VectorSplat:
2530  case CK_IntegralCast:
2531  case CK_IntegralToBoolean:
2532  case CK_IntegralToFloating:
2533  case CK_FloatingToIntegral:
2534  case CK_FloatingToBoolean:
2535  case CK_FloatingCast:
2536  case CK_FloatingRealToComplex:
2537  case CK_FloatingComplexToReal:
2538  case CK_FloatingComplexToBoolean:
2539  case CK_FloatingComplexCast:
2540  case CK_FloatingComplexToIntegralComplex:
2541  case CK_IntegralRealToComplex:
2542  case CK_IntegralComplexToReal:
2543  case CK_IntegralComplexToBoolean:
2544  case CK_IntegralComplexCast:
2545  case CK_IntegralComplexToFloatingComplex:
2546  case CK_DerivedToBaseMemberPointer:
2547  case CK_BaseToDerivedMemberPointer:
2548  case CK_MemberPointerToBoolean:
2549  case CK_ReinterpretMemberPointer:
2550  case CK_AnyPointerToBlockPointerCast:
2551  case CK_ARCProduceObject:
2552  case CK_ARCConsumeObject:
2553  case CK_ARCReclaimReturnedObject:
2554  case CK_ARCExtendBlockObject:
2555  case CK_CopyAndAutoreleaseBlockObject: {
2556    // These casts only produce lvalues when we're binding a reference to a
2557    // temporary realized from a (converted) pure rvalue. Emit the expression
2558    // as a value, copy it into a temporary, and return an lvalue referring to
2559    // that temporary.
2560    llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2561    EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2562    return MakeAddrLValue(V, E->getType());
2563  }
2564
2565  case CK_Dynamic: {
2566    LValue LV = EmitLValue(E->getSubExpr());
2567    llvm::Value *V = LV.getAddress();
2568    const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2569    return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2570  }
2571
2572  case CK_ConstructorConversion:
2573  case CK_UserDefinedConversion:
2574  case CK_CPointerToObjCPointerCast:
2575  case CK_BlockPointerToObjCPointerCast:
2576    return EmitLValue(E->getSubExpr());
2577
2578  case CK_UncheckedDerivedToBase:
2579  case CK_DerivedToBase: {
2580    const RecordType *DerivedClassTy =
2581      E->getSubExpr()->getType()->getAs<RecordType>();
2582    CXXRecordDecl *DerivedClassDecl =
2583      cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2584
2585    LValue LV = EmitLValue(E->getSubExpr());
2586    llvm::Value *This = LV.getAddress();
2587
2588    // Perform the derived-to-base conversion
2589    llvm::Value *Base =
2590      GetAddressOfBaseClass(This, DerivedClassDecl,
2591                            E->path_begin(), E->path_end(),
2592                            /*NullCheckValue=*/false);
2593
2594    return MakeAddrLValue(Base, E->getType());
2595  }
2596  case CK_ToUnion:
2597    return EmitAggExprToLValue(E);
2598  case CK_BaseToDerived: {
2599    const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2600    CXXRecordDecl *DerivedClassDecl =
2601      cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2602
2603    LValue LV = EmitLValue(E->getSubExpr());
2604
2605    // Perform the base-to-derived conversion
2606    llvm::Value *Derived =
2607      GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2608                               E->path_begin(), E->path_end(),
2609                               /*NullCheckValue=*/false);
2610
2611    return MakeAddrLValue(Derived, E->getType());
2612  }
2613  case CK_LValueBitCast: {
2614    // This must be a reinterpret_cast (or c-style equivalent).
2615    const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2616
2617    LValue LV = EmitLValue(E->getSubExpr());
2618    llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2619                                           ConvertType(CE->getTypeAsWritten()));
2620    return MakeAddrLValue(V, E->getType());
2621  }
2622  case CK_ObjCObjectLValueCast: {
2623    LValue LV = EmitLValue(E->getSubExpr());
2624    QualType ToType = getContext().getLValueReferenceType(E->getType());
2625    llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2626                                           ConvertType(ToType));
2627    return MakeAddrLValue(V, E->getType());
2628  }
2629  }
2630
2631  llvm_unreachable("Unhandled lvalue cast kind?");
2632}
2633
2634LValue CodeGenFunction::EmitNullInitializationLValue(
2635                                              const CXXScalarValueInitExpr *E) {
2636  QualType Ty = E->getType();
2637  LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2638  EmitNullInitialization(LV.getAddress(), Ty);
2639  return LV;
2640}
2641
2642LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2643  assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2644  return getOpaqueLValueMapping(e);
2645}
2646
2647LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2648                                           const MaterializeTemporaryExpr *E) {
2649  RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2650  return MakeAddrLValue(RV.getScalarVal(), E->getType());
2651}
2652
2653RValue CodeGenFunction::EmitRValueForField(LValue LV,
2654                                           const FieldDecl *FD) {
2655  QualType FT = FD->getType();
2656  LValue FieldLV = EmitLValueForField(LV, FD);
2657  if (FT->isAnyComplexType())
2658    return RValue::getComplex(
2659        LoadComplexFromAddr(FieldLV.getAddress(),
2660                            FieldLV.isVolatileQualified()));
2661  else if (CodeGenFunction::hasAggregateLLVMType(FT))
2662    return FieldLV.asAggregateRValue();
2663
2664  return EmitLoadOfLValue(FieldLV);
2665}
2666
2667//===--------------------------------------------------------------------===//
2668//                             Expression Emission
2669//===--------------------------------------------------------------------===//
2670
2671RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2672                                     ReturnValueSlot ReturnValue) {
2673  if (CGDebugInfo *DI = getDebugInfo())
2674    DI->EmitLocation(Builder, E->getLocStart());
2675
2676  // Builtins never have block type.
2677  if (E->getCallee()->getType()->isBlockPointerType())
2678    return EmitBlockCallExpr(E, ReturnValue);
2679
2680  if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2681    return EmitCXXMemberCallExpr(CE, ReturnValue);
2682
2683  if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2684    return EmitCUDAKernelCallExpr(CE, ReturnValue);
2685
2686  const Decl *TargetDecl = E->getCalleeDecl();
2687  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2688    if (unsigned builtinID = FD->getBuiltinID())
2689      return EmitBuiltinExpr(FD, builtinID, E);
2690  }
2691
2692  if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2693    if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2694      return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2695
2696  if (const CXXPseudoDestructorExpr *PseudoDtor
2697          = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2698    QualType DestroyedType = PseudoDtor->getDestroyedType();
2699    if (getLangOpts().ObjCAutoRefCount &&
2700        DestroyedType->isObjCLifetimeType() &&
2701        (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2702         DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2703      // Automatic Reference Counting:
2704      //   If the pseudo-expression names a retainable object with weak or
2705      //   strong lifetime, the object shall be released.
2706      Expr *BaseExpr = PseudoDtor->getBase();
2707      llvm::Value *BaseValue = NULL;
2708      Qualifiers BaseQuals;
2709
2710      // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2711      if (PseudoDtor->isArrow()) {
2712        BaseValue = EmitScalarExpr(BaseExpr);
2713        const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2714        BaseQuals = PTy->getPointeeType().getQualifiers();
2715      } else {
2716        LValue BaseLV = EmitLValue(BaseExpr);
2717        BaseValue = BaseLV.getAddress();
2718        QualType BaseTy = BaseExpr->getType();
2719        BaseQuals = BaseTy.getQualifiers();
2720      }
2721
2722      switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2723      case Qualifiers::OCL_None:
2724      case Qualifiers::OCL_ExplicitNone:
2725      case Qualifiers::OCL_Autoreleasing:
2726        break;
2727
2728      case Qualifiers::OCL_Strong:
2729        EmitARCRelease(Builder.CreateLoad(BaseValue,
2730                          PseudoDtor->getDestroyedType().isVolatileQualified()),
2731                       /*precise*/ true);
2732        break;
2733
2734      case Qualifiers::OCL_Weak:
2735        EmitARCDestroyWeak(BaseValue);
2736        break;
2737      }
2738    } else {
2739      // C++ [expr.pseudo]p1:
2740      //   The result shall only be used as the operand for the function call
2741      //   operator (), and the result of such a call has type void. The only
2742      //   effect is the evaluation of the postfix-expression before the dot or
2743      //   arrow.
2744      EmitScalarExpr(E->getCallee());
2745    }
2746
2747    return RValue::get(0);
2748  }
2749
2750  llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2751  return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2752                  E->arg_begin(), E->arg_end(), TargetDecl);
2753}
2754
2755LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2756  // Comma expressions just emit their LHS then their RHS as an l-value.
2757  if (E->getOpcode() == BO_Comma) {
2758    EmitIgnoredExpr(E->getLHS());
2759    EnsureInsertPoint();
2760    return EmitLValue(E->getRHS());
2761  }
2762
2763  if (E->getOpcode() == BO_PtrMemD ||
2764      E->getOpcode() == BO_PtrMemI)
2765    return EmitPointerToDataMemberBinaryExpr(E);
2766
2767  assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2768
2769  // Note that in all of these cases, __block variables need the RHS
2770  // evaluated first just in case the variable gets moved by the RHS.
2771
2772  if (!hasAggregateLLVMType(E->getType())) {
2773    switch (E->getLHS()->getType().getObjCLifetime()) {
2774    case Qualifiers::OCL_Strong:
2775      return EmitARCStoreStrong(E, /*ignored*/ false).first;
2776
2777    case Qualifiers::OCL_Autoreleasing:
2778      return EmitARCStoreAutoreleasing(E).first;
2779
2780    // No reason to do any of these differently.
2781    case Qualifiers::OCL_None:
2782    case Qualifiers::OCL_ExplicitNone:
2783    case Qualifiers::OCL_Weak:
2784      break;
2785    }
2786
2787    RValue RV = EmitAnyExpr(E->getRHS());
2788    LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
2789    EmitStoreThroughLValue(RV, LV);
2790    return LV;
2791  }
2792
2793  if (E->getType()->isAnyComplexType())
2794    return EmitComplexAssignmentLValue(E);
2795
2796  return EmitAggExprToLValue(E);
2797}
2798
2799LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2800  RValue RV = EmitCallExpr(E);
2801
2802  if (!RV.isScalar())
2803    return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2804
2805  assert(E->getCallReturnType()->isReferenceType() &&
2806         "Can't have a scalar return unless the return type is a "
2807         "reference type!");
2808
2809  return MakeAddrLValue(RV.getScalarVal(), E->getType());
2810}
2811
2812LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2813  // FIXME: This shouldn't require another copy.
2814  return EmitAggExprToLValue(E);
2815}
2816
2817LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2818  assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2819         && "binding l-value to type which needs a temporary");
2820  AggValueSlot Slot = CreateAggTemp(E->getType());
2821  EmitCXXConstructExpr(E, Slot);
2822  return MakeAddrLValue(Slot.getAddr(), E->getType());
2823}
2824
2825LValue
2826CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2827  return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2828}
2829
2830llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
2831  return CGM.GetAddrOfUuidDescriptor(E);
2832}
2833
2834LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
2835  return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
2836}
2837
2838LValue
2839CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2840  AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2841  Slot.setExternallyDestructed();
2842  EmitAggExpr(E->getSubExpr(), Slot);
2843  EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2844  return MakeAddrLValue(Slot.getAddr(), E->getType());
2845}
2846
2847LValue
2848CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2849  AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2850  EmitLambdaExpr(E, Slot);
2851  return MakeAddrLValue(Slot.getAddr(), E->getType());
2852}
2853
2854LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2855  RValue RV = EmitObjCMessageExpr(E);
2856
2857  if (!RV.isScalar())
2858    return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2859
2860  assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2861         "Can't have a scalar return unless the return type is a "
2862         "reference type!");
2863
2864  return MakeAddrLValue(RV.getScalarVal(), E->getType());
2865}
2866
2867LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2868  llvm::Value *V =
2869    CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2870  return MakeAddrLValue(V, E->getType());
2871}
2872
2873llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2874                                             const ObjCIvarDecl *Ivar) {
2875  return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2876}
2877
2878LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2879                                          llvm::Value *BaseValue,
2880                                          const ObjCIvarDecl *Ivar,
2881                                          unsigned CVRQualifiers) {
2882  return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2883                                                   Ivar, CVRQualifiers);
2884}
2885
2886LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2887  // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2888  llvm::Value *BaseValue = 0;
2889  const Expr *BaseExpr = E->getBase();
2890  Qualifiers BaseQuals;
2891  QualType ObjectTy;
2892  if (E->isArrow()) {
2893    BaseValue = EmitScalarExpr(BaseExpr);
2894    ObjectTy = BaseExpr->getType()->getPointeeType();
2895    BaseQuals = ObjectTy.getQualifiers();
2896  } else {
2897    LValue BaseLV = EmitLValue(BaseExpr);
2898    // FIXME: this isn't right for bitfields.
2899    BaseValue = BaseLV.getAddress();
2900    ObjectTy = BaseExpr->getType();
2901    BaseQuals = ObjectTy.getQualifiers();
2902  }
2903
2904  LValue LV =
2905    EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2906                      BaseQuals.getCVRQualifiers());
2907  setObjCGCLValueClass(getContext(), E, LV);
2908  return LV;
2909}
2910
2911LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2912  // Can only get l-value for message expression returning aggregate type
2913  RValue RV = EmitAnyExprToTemp(E);
2914  return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2915}
2916
2917RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2918                                 ReturnValueSlot ReturnValue,
2919                                 CallExpr::const_arg_iterator ArgBeg,
2920                                 CallExpr::const_arg_iterator ArgEnd,
2921                                 const Decl *TargetDecl) {
2922  // Get the actual function type. The callee type will always be a pointer to
2923  // function type or a block pointer type.
2924  assert(CalleeType->isFunctionPointerType() &&
2925         "Call must have function pointer type!");
2926
2927  CalleeType = getContext().getCanonicalType(CalleeType);
2928
2929  const FunctionType *FnType
2930    = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2931
2932  CallArgList Args;
2933  EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2934
2935  const CGFunctionInfo &FnInfo =
2936    CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
2937
2938  // C99 6.5.2.2p6:
2939  //   If the expression that denotes the called function has a type
2940  //   that does not include a prototype, [the default argument
2941  //   promotions are performed]. If the number of arguments does not
2942  //   equal the number of parameters, the behavior is undefined. If
2943  //   the function is defined with a type that includes a prototype,
2944  //   and either the prototype ends with an ellipsis (, ...) or the
2945  //   types of the arguments after promotion are not compatible with
2946  //   the types of the parameters, the behavior is undefined. If the
2947  //   function is defined with a type that does not include a
2948  //   prototype, and the types of the arguments after promotion are
2949  //   not compatible with those of the parameters after promotion,
2950  //   the behavior is undefined [except in some trivial cases].
2951  // That is, in the general case, we should assume that a call
2952  // through an unprototyped function type works like a *non-variadic*
2953  // call.  The way we make this work is to cast to the exact type
2954  // of the promoted arguments.
2955  if (isa<FunctionNoProtoType>(FnType)) {
2956    llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2957    CalleeTy = CalleeTy->getPointerTo();
2958    Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2959  }
2960
2961  return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2962}
2963
2964LValue CodeGenFunction::
2965EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2966  llvm::Value *BaseV;
2967  if (E->getOpcode() == BO_PtrMemI)
2968    BaseV = EmitScalarExpr(E->getLHS());
2969  else
2970    BaseV = EmitLValue(E->getLHS()).getAddress();
2971
2972  llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2973
2974  const MemberPointerType *MPT
2975    = E->getRHS()->getType()->getAs<MemberPointerType>();
2976
2977  llvm::Value *AddV =
2978    CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2979
2980  return MakeAddrLValue(AddV, MPT->getPointeeType());
2981}
2982
2983static void
2984EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
2985             llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
2986             uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
2987  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
2988  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
2989
2990  switch (E->getOp()) {
2991  case AtomicExpr::AO__c11_atomic_init:
2992    llvm_unreachable("Already handled!");
2993
2994  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2995  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2996  case AtomicExpr::AO__atomic_compare_exchange:
2997  case AtomicExpr::AO__atomic_compare_exchange_n: {
2998    // Note that cmpxchg only supports specifying one ordering and
2999    // doesn't support weak cmpxchg, at least at the moment.
3000    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3001    LoadVal1->setAlignment(Align);
3002    llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
3003    LoadVal2->setAlignment(Align);
3004    llvm::AtomicCmpXchgInst *CXI =
3005        CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
3006    CXI->setVolatile(E->isVolatile());
3007    llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
3008    StoreVal1->setAlignment(Align);
3009    llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
3010    CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
3011    return;
3012  }
3013
3014  case AtomicExpr::AO__c11_atomic_load:
3015  case AtomicExpr::AO__atomic_load_n:
3016  case AtomicExpr::AO__atomic_load: {
3017    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
3018    Load->setAtomic(Order);
3019    Load->setAlignment(Size);
3020    Load->setVolatile(E->isVolatile());
3021    llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
3022    StoreDest->setAlignment(Align);
3023    return;
3024  }
3025
3026  case AtomicExpr::AO__c11_atomic_store:
3027  case AtomicExpr::AO__atomic_store:
3028  case AtomicExpr::AO__atomic_store_n: {
3029    assert(!Dest && "Store does not return a value");
3030    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3031    LoadVal1->setAlignment(Align);
3032    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
3033    Store->setAtomic(Order);
3034    Store->setAlignment(Size);
3035    Store->setVolatile(E->isVolatile());
3036    return;
3037  }
3038
3039  case AtomicExpr::AO__c11_atomic_exchange:
3040  case AtomicExpr::AO__atomic_exchange_n:
3041  case AtomicExpr::AO__atomic_exchange:
3042    Op = llvm::AtomicRMWInst::Xchg;
3043    break;
3044
3045  case AtomicExpr::AO__atomic_add_fetch:
3046    PostOp = llvm::Instruction::Add;
3047    // Fall through.
3048  case AtomicExpr::AO__c11_atomic_fetch_add:
3049  case AtomicExpr::AO__atomic_fetch_add:
3050    Op = llvm::AtomicRMWInst::Add;
3051    break;
3052
3053  case AtomicExpr::AO__atomic_sub_fetch:
3054    PostOp = llvm::Instruction::Sub;
3055    // Fall through.
3056  case AtomicExpr::AO__c11_atomic_fetch_sub:
3057  case AtomicExpr::AO__atomic_fetch_sub:
3058    Op = llvm::AtomicRMWInst::Sub;
3059    break;
3060
3061  case AtomicExpr::AO__atomic_and_fetch:
3062    PostOp = llvm::Instruction::And;
3063    // Fall through.
3064  case AtomicExpr::AO__c11_atomic_fetch_and:
3065  case AtomicExpr::AO__atomic_fetch_and:
3066    Op = llvm::AtomicRMWInst::And;
3067    break;
3068
3069  case AtomicExpr::AO__atomic_or_fetch:
3070    PostOp = llvm::Instruction::Or;
3071    // Fall through.
3072  case AtomicExpr::AO__c11_atomic_fetch_or:
3073  case AtomicExpr::AO__atomic_fetch_or:
3074    Op = llvm::AtomicRMWInst::Or;
3075    break;
3076
3077  case AtomicExpr::AO__atomic_xor_fetch:
3078    PostOp = llvm::Instruction::Xor;
3079    // Fall through.
3080  case AtomicExpr::AO__c11_atomic_fetch_xor:
3081  case AtomicExpr::AO__atomic_fetch_xor:
3082    Op = llvm::AtomicRMWInst::Xor;
3083    break;
3084
3085  case AtomicExpr::AO__atomic_nand_fetch:
3086    PostOp = llvm::Instruction::And;
3087    // Fall through.
3088  case AtomicExpr::AO__atomic_fetch_nand:
3089    Op = llvm::AtomicRMWInst::Nand;
3090    break;
3091  }
3092
3093  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3094  LoadVal1->setAlignment(Align);
3095  llvm::AtomicRMWInst *RMWI =
3096      CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
3097  RMWI->setVolatile(E->isVolatile());
3098
3099  // For __atomic_*_fetch operations, perform the operation again to
3100  // determine the value which was written.
3101  llvm::Value *Result = RMWI;
3102  if (PostOp)
3103    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
3104  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
3105    Result = CGF.Builder.CreateNot(Result);
3106  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
3107  StoreDest->setAlignment(Align);
3108}
3109
3110// This function emits any expression (scalar, complex, or aggregate)
3111// into a temporary alloca.
3112static llvm::Value *
3113EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
3114  llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
3115  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
3116                       /*Init*/ true);
3117  return DeclPtr;
3118}
3119
3120static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
3121                                  llvm::Value *Dest) {
3122  if (Ty->isAnyComplexType())
3123    return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
3124  if (CGF.hasAggregateLLVMType(Ty))
3125    return RValue::getAggregate(Dest);
3126  return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
3127}
3128
3129RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
3130  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
3131  QualType MemTy = AtomicTy;
3132  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
3133    MemTy = AT->getValueType();
3134  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
3135  uint64_t Size = sizeChars.getQuantity();
3136  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
3137  unsigned Align = alignChars.getQuantity();
3138  unsigned MaxInlineWidthInBits =
3139    getContext().getTargetInfo().getMaxAtomicInlineWidth();
3140  bool UseLibcall = (Size != Align ||
3141                     getContext().toBits(sizeChars) > MaxInlineWidthInBits);
3142
3143  llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
3144  Ptr = EmitScalarExpr(E->getPtr());
3145
3146  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
3147    assert(!Dest && "Init does not return a value");
3148    if (!hasAggregateLLVMType(E->getVal1()->getType())) {
3149      QualType PointeeType
3150        = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
3151      EmitScalarInit(EmitScalarExpr(E->getVal1()),
3152                     LValue::MakeAddr(Ptr, PointeeType, alignChars,
3153                                      getContext()));
3154    } else if (E->getType()->isAnyComplexType()) {
3155      EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
3156    } else {
3157      AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
3158                                        AtomicTy.getQualifiers(),
3159                                        AggValueSlot::IsNotDestructed,
3160                                        AggValueSlot::DoesNotNeedGCBarriers,
3161                                        AggValueSlot::IsNotAliased);
3162      EmitAggExpr(E->getVal1(), Slot);
3163    }
3164    return RValue::get(0);
3165  }
3166
3167  Order = EmitScalarExpr(E->getOrder());
3168
3169  switch (E->getOp()) {
3170  case AtomicExpr::AO__c11_atomic_init:
3171    llvm_unreachable("Already handled!");
3172
3173  case AtomicExpr::AO__c11_atomic_load:
3174  case AtomicExpr::AO__atomic_load_n:
3175    break;
3176
3177  case AtomicExpr::AO__atomic_load:
3178    Dest = EmitScalarExpr(E->getVal1());
3179    break;
3180
3181  case AtomicExpr::AO__atomic_store:
3182    Val1 = EmitScalarExpr(E->getVal1());
3183    break;
3184
3185  case AtomicExpr::AO__atomic_exchange:
3186    Val1 = EmitScalarExpr(E->getVal1());
3187    Dest = EmitScalarExpr(E->getVal2());
3188    break;
3189
3190  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3191  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3192  case AtomicExpr::AO__atomic_compare_exchange_n:
3193  case AtomicExpr::AO__atomic_compare_exchange:
3194    Val1 = EmitScalarExpr(E->getVal1());
3195    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
3196      Val2 = EmitScalarExpr(E->getVal2());
3197    else
3198      Val2 = EmitValToTemp(*this, E->getVal2());
3199    OrderFail = EmitScalarExpr(E->getOrderFail());
3200    // Evaluate and discard the 'weak' argument.
3201    if (E->getNumSubExprs() == 6)
3202      EmitScalarExpr(E->getWeak());
3203    break;
3204
3205  case AtomicExpr::AO__c11_atomic_fetch_add:
3206  case AtomicExpr::AO__c11_atomic_fetch_sub:
3207    if (MemTy->isPointerType()) {
3208      // For pointer arithmetic, we're required to do a bit of math:
3209      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
3210      // ... but only for the C11 builtins. The GNU builtins expect the
3211      // user to multiply by sizeof(T).
3212      QualType Val1Ty = E->getVal1()->getType();
3213      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
3214      CharUnits PointeeIncAmt =
3215          getContext().getTypeSizeInChars(MemTy->getPointeeType());
3216      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
3217      Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
3218      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
3219      break;
3220    }
3221    // Fall through.
3222  case AtomicExpr::AO__atomic_fetch_add:
3223  case AtomicExpr::AO__atomic_fetch_sub:
3224  case AtomicExpr::AO__atomic_add_fetch:
3225  case AtomicExpr::AO__atomic_sub_fetch:
3226  case AtomicExpr::AO__c11_atomic_store:
3227  case AtomicExpr::AO__c11_atomic_exchange:
3228  case AtomicExpr::AO__atomic_store_n:
3229  case AtomicExpr::AO__atomic_exchange_n:
3230  case AtomicExpr::AO__c11_atomic_fetch_and:
3231  case AtomicExpr::AO__c11_atomic_fetch_or:
3232  case AtomicExpr::AO__c11_atomic_fetch_xor:
3233  case AtomicExpr::AO__atomic_fetch_and:
3234  case AtomicExpr::AO__atomic_fetch_or:
3235  case AtomicExpr::AO__atomic_fetch_xor:
3236  case AtomicExpr::AO__atomic_fetch_nand:
3237  case AtomicExpr::AO__atomic_and_fetch:
3238  case AtomicExpr::AO__atomic_or_fetch:
3239  case AtomicExpr::AO__atomic_xor_fetch:
3240  case AtomicExpr::AO__atomic_nand_fetch:
3241    Val1 = EmitValToTemp(*this, E->getVal1());
3242    break;
3243  }
3244
3245  if (!E->getType()->isVoidType() && !Dest)
3246    Dest = CreateMemTemp(E->getType(), ".atomicdst");
3247
3248  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
3249  if (UseLibcall) {
3250
3251    llvm::SmallVector<QualType, 5> Params;
3252    CallArgList Args;
3253    // Size is always the first parameter
3254    Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
3255             getContext().getSizeType());
3256    // Atomic address is always the second parameter
3257    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
3258             getContext().VoidPtrTy);
3259
3260    const char* LibCallName;
3261    QualType RetTy = getContext().VoidTy;
3262    switch (E->getOp()) {
3263    // There is only one libcall for compare an exchange, because there is no
3264    // optimisation benefit possible from a libcall version of a weak compare
3265    // and exchange.
3266    // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
3267    //                                void *desired, int success, int failure)
3268    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3269    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3270    case AtomicExpr::AO__atomic_compare_exchange:
3271    case AtomicExpr::AO__atomic_compare_exchange_n:
3272      LibCallName = "__atomic_compare_exchange";
3273      RetTy = getContext().BoolTy;
3274      Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3275               getContext().VoidPtrTy);
3276      Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
3277               getContext().VoidPtrTy);
3278      Args.add(RValue::get(Order),
3279               getContext().IntTy);
3280      Order = OrderFail;
3281      break;
3282    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
3283    //                        int order)
3284    case AtomicExpr::AO__c11_atomic_exchange:
3285    case AtomicExpr::AO__atomic_exchange_n:
3286    case AtomicExpr::AO__atomic_exchange:
3287      LibCallName = "__atomic_exchange";
3288      Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3289               getContext().VoidPtrTy);
3290      Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3291               getContext().VoidPtrTy);
3292      break;
3293    // void __atomic_store(size_t size, void *mem, void *val, int order)
3294    case AtomicExpr::AO__c11_atomic_store:
3295    case AtomicExpr::AO__atomic_store:
3296    case AtomicExpr::AO__atomic_store_n:
3297      LibCallName = "__atomic_store";
3298      Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3299               getContext().VoidPtrTy);
3300      break;
3301    // void __atomic_load(size_t size, void *mem, void *return, int order)
3302    case AtomicExpr::AO__c11_atomic_load:
3303    case AtomicExpr::AO__atomic_load:
3304    case AtomicExpr::AO__atomic_load_n:
3305      LibCallName = "__atomic_load";
3306      Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3307               getContext().VoidPtrTy);
3308      break;
3309#if 0
3310    // These are only defined for 1-16 byte integers.  It is not clear what
3311    // their semantics would be on anything else...
3312    case AtomicExpr::Add:   LibCallName = "__atomic_fetch_add_generic"; break;
3313    case AtomicExpr::Sub:   LibCallName = "__atomic_fetch_sub_generic"; break;
3314    case AtomicExpr::And:   LibCallName = "__atomic_fetch_and_generic"; break;
3315    case AtomicExpr::Or:    LibCallName = "__atomic_fetch_or_generic"; break;
3316    case AtomicExpr::Xor:   LibCallName = "__atomic_fetch_xor_generic"; break;
3317#endif
3318    default: return EmitUnsupportedRValue(E, "atomic library call");
3319    }
3320    // order is always the last parameter
3321    Args.add(RValue::get(Order),
3322             getContext().IntTy);
3323
3324    const CGFunctionInfo &FuncInfo =
3325        CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
3326            FunctionType::ExtInfo(), RequiredArgs::All);
3327    llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3328    llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3329    RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3330    if (E->isCmpXChg())
3331      return Res;
3332    if (E->getType()->isVoidType())
3333      return RValue::get(0);
3334    return ConvertTempToRValue(*this, E->getType(), Dest);
3335  }
3336
3337  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3338                 E->getOp() == AtomicExpr::AO__atomic_store ||
3339                 E->getOp() == AtomicExpr::AO__atomic_store_n;
3340  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3341                E->getOp() == AtomicExpr::AO__atomic_load ||
3342                E->getOp() == AtomicExpr::AO__atomic_load_n;
3343
3344  llvm::Type *IPtrTy =
3345      llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3346  llvm::Value *OrigDest = Dest;
3347  Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3348  if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3349  if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3350  if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3351
3352  if (isa<llvm::ConstantInt>(Order)) {
3353    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3354    switch (ord) {
3355    case 0:  // memory_order_relaxed
3356      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3357                   llvm::Monotonic);
3358      break;
3359    case 1:  // memory_order_consume
3360    case 2:  // memory_order_acquire
3361      if (IsStore)
3362        break; // Avoid crashing on code with undefined behavior
3363      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3364                   llvm::Acquire);
3365      break;
3366    case 3:  // memory_order_release
3367      if (IsLoad)
3368        break; // Avoid crashing on code with undefined behavior
3369      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3370                   llvm::Release);
3371      break;
3372    case 4:  // memory_order_acq_rel
3373      if (IsLoad || IsStore)
3374        break; // Avoid crashing on code with undefined behavior
3375      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3376                   llvm::AcquireRelease);
3377      break;
3378    case 5:  // memory_order_seq_cst
3379      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3380                   llvm::SequentiallyConsistent);
3381      break;
3382    default: // invalid order
3383      // We should not ever get here normally, but it's hard to
3384      // enforce that in general.
3385      break;
3386    }
3387    if (E->getType()->isVoidType())
3388      return RValue::get(0);
3389    return ConvertTempToRValue(*this, E->getType(), OrigDest);
3390  }
3391
3392  // Long case, when Order isn't obviously constant.
3393
3394  // Create all the relevant BB's
3395  llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3396                   *AcqRelBB = 0, *SeqCstBB = 0;
3397  MonotonicBB = createBasicBlock("monotonic", CurFn);
3398  if (!IsStore)
3399    AcquireBB = createBasicBlock("acquire", CurFn);
3400  if (!IsLoad)
3401    ReleaseBB = createBasicBlock("release", CurFn);
3402  if (!IsLoad && !IsStore)
3403    AcqRelBB = createBasicBlock("acqrel", CurFn);
3404  SeqCstBB = createBasicBlock("seqcst", CurFn);
3405  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3406
3407  // Create the switch for the split
3408  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3409  // doesn't matter unless someone is crazy enough to use something that
3410  // doesn't fold to a constant for the ordering.
3411  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3412  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3413
3414  // Emit all the different atomics
3415  Builder.SetInsertPoint(MonotonicBB);
3416  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3417               llvm::Monotonic);
3418  Builder.CreateBr(ContBB);
3419  if (!IsStore) {
3420    Builder.SetInsertPoint(AcquireBB);
3421    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3422                 llvm::Acquire);
3423    Builder.CreateBr(ContBB);
3424    SI->addCase(Builder.getInt32(1), AcquireBB);
3425    SI->addCase(Builder.getInt32(2), AcquireBB);
3426  }
3427  if (!IsLoad) {
3428    Builder.SetInsertPoint(ReleaseBB);
3429    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3430                 llvm::Release);
3431    Builder.CreateBr(ContBB);
3432    SI->addCase(Builder.getInt32(3), ReleaseBB);
3433  }
3434  if (!IsLoad && !IsStore) {
3435    Builder.SetInsertPoint(AcqRelBB);
3436    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3437                 llvm::AcquireRelease);
3438    Builder.CreateBr(ContBB);
3439    SI->addCase(Builder.getInt32(4), AcqRelBB);
3440  }
3441  Builder.SetInsertPoint(SeqCstBB);
3442  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3443               llvm::SequentiallyConsistent);
3444  Builder.CreateBr(ContBB);
3445  SI->addCase(Builder.getInt32(5), SeqCstBB);
3446
3447  // Cleanup and return
3448  Builder.SetInsertPoint(ContBB);
3449  if (E->getType()->isVoidType())
3450    return RValue::get(0);
3451  return ConvertTempToRValue(*this, E->getType(), OrigDest);
3452}
3453
3454void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3455  assert(Val->getType()->isFPOrFPVectorTy());
3456  if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3457    return;
3458
3459  llvm::MDBuilder MDHelper(getLLVMContext());
3460  llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3461
3462  cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3463}
3464
3465namespace {
3466  struct LValueOrRValue {
3467    LValue LV;
3468    RValue RV;
3469  };
3470}
3471
3472static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3473                                           const PseudoObjectExpr *E,
3474                                           bool forLValue,
3475                                           AggValueSlot slot) {
3476  llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3477
3478  // Find the result expression, if any.
3479  const Expr *resultExpr = E->getResultExpr();
3480  LValueOrRValue result;
3481
3482  for (PseudoObjectExpr::const_semantics_iterator
3483         i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3484    const Expr *semantic = *i;
3485
3486    // If this semantic expression is an opaque value, bind it
3487    // to the result of its source expression.
3488    if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3489
3490      // If this is the result expression, we may need to evaluate
3491      // directly into the slot.
3492      typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3493      OVMA opaqueData;
3494      if (ov == resultExpr && ov->isRValue() && !forLValue &&
3495          CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3496          !ov->getType()->isAnyComplexType()) {
3497        CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3498
3499        LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3500        opaqueData = OVMA::bind(CGF, ov, LV);
3501        result.RV = slot.asRValue();
3502
3503      // Otherwise, emit as normal.
3504      } else {
3505        opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3506
3507        // If this is the result, also evaluate the result now.
3508        if (ov == resultExpr) {
3509          if (forLValue)
3510            result.LV = CGF.EmitLValue(ov);
3511          else
3512            result.RV = CGF.EmitAnyExpr(ov, slot);
3513        }
3514      }
3515
3516      opaques.push_back(opaqueData);
3517
3518    // Otherwise, if the expression is the result, evaluate it
3519    // and remember the result.
3520    } else if (semantic == resultExpr) {
3521      if (forLValue)
3522        result.LV = CGF.EmitLValue(semantic);
3523      else
3524        result.RV = CGF.EmitAnyExpr(semantic, slot);
3525
3526    // Otherwise, evaluate the expression in an ignored context.
3527    } else {
3528      CGF.EmitIgnoredExpr(semantic);
3529    }
3530  }
3531
3532  // Unbind all the opaques now.
3533  for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3534    opaques[i].unbind(CGF);
3535
3536  return result;
3537}
3538
3539RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3540                                               AggValueSlot slot) {
3541  return emitPseudoObjectExpr(*this, E, false, slot).RV;
3542}
3543
3544LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3545  return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3546}
3547