CGExprScalar.cpp revision 62a11a78c2616ce1fa3ef7b1a4bc3f42365cafb9
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/AST/ASTContext.h"
17#include "clang/AST/DeclObjC.h"
18#include "clang/AST/StmtVisitor.h"
19#include "clang/Basic/TargetInfo.h"
20#include "llvm/Constants.h"
21#include "llvm/Function.h"
22#include "llvm/GlobalVariable.h"
23#include "llvm/Intrinsics.h"
24#include "llvm/Module.h"
25#include "llvm/Support/Compiler.h"
26#include "llvm/Support/CFG.h"
27#include "llvm/Target/TargetData.h"
28#include <cstdarg>
29
30using namespace clang;
31using namespace CodeGen;
32using llvm::Value;
33
34//===----------------------------------------------------------------------===//
35//                         Scalar Expression Emitter
36//===----------------------------------------------------------------------===//
37
38struct BinOpInfo {
39  Value *LHS;
40  Value *RHS;
41  QualType Ty;  // Computation Type.
42  const BinaryOperator *E;
43};
44
45namespace {
46class VISIBILITY_HIDDEN ScalarExprEmitter
47  : public StmtVisitor<ScalarExprEmitter, Value*> {
48  CodeGenFunction &CGF;
49  CGBuilderTy &Builder;
50  bool IgnoreResultAssign;
51  llvm::LLVMContext &VMContext;
52public:
53
54  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
55    : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
56      VMContext(cgf.getLLVMContext()) {
57  }
58
59  //===--------------------------------------------------------------------===//
60  //                               Utilities
61  //===--------------------------------------------------------------------===//
62
63  bool TestAndClearIgnoreResultAssign() {
64    bool I = IgnoreResultAssign;
65    IgnoreResultAssign = false;
66    return I;
67  }
68
69  const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
70  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
71
72  Value *EmitLoadOfLValue(LValue LV, QualType T) {
73    return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
74  }
75
76  /// EmitLoadOfLValue - Given an expression with complex type that represents a
77  /// value l-value, this method emits the address of the l-value, then loads
78  /// and returns the result.
79  Value *EmitLoadOfLValue(const Expr *E) {
80    return EmitLoadOfLValue(EmitLValue(E), E->getType());
81  }
82
83  /// EmitConversionToBool - Convert the specified expression value to a
84  /// boolean (i1) truth value.  This is equivalent to "Val != 0".
85  Value *EmitConversionToBool(Value *Src, QualType DstTy);
86
87  /// EmitScalarConversion - Emit a conversion from the specified type to the
88  /// specified destination type, both of which are LLVM scalar types.
89  Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
90
91  /// EmitComplexToScalarConversion - Emit a conversion from the specified
92  /// complex type to the specified destination type, where the destination
93  /// type is an LLVM scalar type.
94  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
95                                       QualType SrcTy, QualType DstTy);
96
97  //===--------------------------------------------------------------------===//
98  //                            Visitor Methods
99  //===--------------------------------------------------------------------===//
100
101  Value *VisitStmt(Stmt *S) {
102    S->dump(CGF.getContext().getSourceManager());
103    assert(0 && "Stmt can't have complex result type!");
104    return 0;
105  }
106  Value *VisitExpr(Expr *S);
107  Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
108
109  // Leaves.
110  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
111    return VMContext.getConstantInt(E->getValue());
112  }
113  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
114    return VMContext.getConstantFP(E->getValue());
115  }
116  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
117    return VMContext.getConstantInt(ConvertType(E->getType()), E->getValue());
118  }
119  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
120    return VMContext.getConstantInt(ConvertType(E->getType()), E->getValue());
121  }
122  Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
123    return VMContext.getNullValue(ConvertType(E->getType()));
124  }
125  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
126    return VMContext.getNullValue(ConvertType(E->getType()));
127  }
128  Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
129    return VMContext.getConstantInt(ConvertType(E->getType()),
130                                  CGF.getContext().typesAreCompatible(
131                                    E->getArgType1(), E->getArgType2()));
132  }
133  Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
134  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
135    llvm::Value *V =
136      VMContext.getConstantInt(llvm::Type::Int32Ty,
137                             CGF.GetIDForAddrOfLabel(E->getLabel()));
138
139    return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
140  }
141
142  // l-values.
143  Value *VisitDeclRefExpr(DeclRefExpr *E) {
144    if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
145      return VMContext.getConstantInt(EC->getInitVal());
146    return EmitLoadOfLValue(E);
147  }
148  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
149    return CGF.EmitObjCSelectorExpr(E);
150  }
151  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
152    return CGF.EmitObjCProtocolExpr(E);
153  }
154  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
155    return EmitLoadOfLValue(E);
156  }
157  Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
158    return EmitLoadOfLValue(E);
159  }
160  Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
161    return EmitLoadOfLValue(E);
162  }
163  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
164    return CGF.EmitObjCMessageExpr(E).getScalarVal();
165  }
166
167  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
168  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
169  Value *VisitMemberExpr(Expr *E)           { return EmitLoadOfLValue(E); }
170  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
171  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
172    return EmitLoadOfLValue(E);
173  }
174  Value *VisitStringLiteral(Expr *E)  { return EmitLValue(E).getAddress(); }
175  Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
176     return EmitLValue(E).getAddress();
177  }
178
179  Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
180
181  Value *VisitInitListExpr(InitListExpr *E) {
182    bool Ignore = TestAndClearIgnoreResultAssign();
183    (void)Ignore;
184    assert (Ignore == false && "init list ignored");
185    unsigned NumInitElements = E->getNumInits();
186
187    if (E->hadArrayRangeDesignator()) {
188      CGF.ErrorUnsupported(E, "GNU array range designator extension");
189    }
190
191    const llvm::VectorType *VType =
192      dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
193
194    // We have a scalar in braces. Just use the first element.
195    if (!VType)
196      return Visit(E->getInit(0));
197
198    unsigned NumVectorElements = VType->getNumElements();
199    const llvm::Type *ElementType = VType->getElementType();
200
201    // Emit individual vector element stores.
202    llvm::Value *V = VMContext.getUndef(VType);
203
204    // Emit initializers
205    unsigned i;
206    for (i = 0; i < NumInitElements; ++i) {
207      Value *NewV = Visit(E->getInit(i));
208      Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
209      V = Builder.CreateInsertElement(V, NewV, Idx);
210    }
211
212    // Emit remaining default initializers
213    for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
214      Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
215      llvm::Value *NewV = VMContext.getNullValue(ElementType);
216      V = Builder.CreateInsertElement(V, NewV, Idx);
217    }
218
219    return V;
220  }
221
222  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
223    return VMContext.getNullValue(ConvertType(E->getType()));
224  }
225  Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
226  Value *VisitCastExpr(const CastExpr *E) {
227    // Make sure to evaluate VLA bounds now so that we have them for later.
228    if (E->getType()->isVariablyModifiedType())
229      CGF.EmitVLASize(E->getType());
230
231    return EmitCastExpr(E->getSubExpr(), E->getType());
232  }
233  Value *EmitCastExpr(const Expr *E, QualType T);
234
235  Value *VisitCallExpr(const CallExpr *E) {
236    if (E->getCallReturnType()->isReferenceType())
237      return EmitLoadOfLValue(E);
238
239    return CGF.EmitCallExpr(E).getScalarVal();
240  }
241
242  Value *VisitStmtExpr(const StmtExpr *E);
243
244  Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
245
246  // Unary Operators.
247  Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
248  Value *VisitUnaryPostDec(const UnaryOperator *E) {
249    return VisitPrePostIncDec(E, false, false);
250  }
251  Value *VisitUnaryPostInc(const UnaryOperator *E) {
252    return VisitPrePostIncDec(E, true, false);
253  }
254  Value *VisitUnaryPreDec(const UnaryOperator *E) {
255    return VisitPrePostIncDec(E, false, true);
256  }
257  Value *VisitUnaryPreInc(const UnaryOperator *E) {
258    return VisitPrePostIncDec(E, true, true);
259  }
260  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
261    return EmitLValue(E->getSubExpr()).getAddress();
262  }
263  Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
264  Value *VisitUnaryPlus(const UnaryOperator *E) {
265    // This differs from gcc, though, most likely due to a bug in gcc.
266    TestAndClearIgnoreResultAssign();
267    return Visit(E->getSubExpr());
268  }
269  Value *VisitUnaryMinus    (const UnaryOperator *E);
270  Value *VisitUnaryNot      (const UnaryOperator *E);
271  Value *VisitUnaryLNot     (const UnaryOperator *E);
272  Value *VisitUnaryReal     (const UnaryOperator *E);
273  Value *VisitUnaryImag     (const UnaryOperator *E);
274  Value *VisitUnaryExtension(const UnaryOperator *E) {
275    return Visit(E->getSubExpr());
276  }
277  Value *VisitUnaryOffsetOf(const UnaryOperator *E);
278
279  // C++
280  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
281    return Visit(DAE->getExpr());
282  }
283  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
284    return CGF.LoadCXXThis();
285  }
286
287  Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
288    return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
289  }
290  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
291    return CGF.EmitCXXNewExpr(E);
292  }
293
294  // Binary Operators.
295  Value *EmitMul(const BinOpInfo &Ops) {
296    if (CGF.getContext().getLangOptions().OverflowChecking
297        && Ops.Ty->isSignedIntegerType())
298      return EmitOverflowCheckedBinOp(Ops);
299    if (Ops.LHS->getType()->isFPOrFPVector())
300      return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
301    return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
302  }
303  /// Create a binary op that checks for overflow.
304  /// Currently only supports +, - and *.
305  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
306  Value *EmitDiv(const BinOpInfo &Ops);
307  Value *EmitRem(const BinOpInfo &Ops);
308  Value *EmitAdd(const BinOpInfo &Ops);
309  Value *EmitSub(const BinOpInfo &Ops);
310  Value *EmitShl(const BinOpInfo &Ops);
311  Value *EmitShr(const BinOpInfo &Ops);
312  Value *EmitAnd(const BinOpInfo &Ops) {
313    return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
314  }
315  Value *EmitXor(const BinOpInfo &Ops) {
316    return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
317  }
318  Value *EmitOr (const BinOpInfo &Ops) {
319    return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
320  }
321
322  BinOpInfo EmitBinOps(const BinaryOperator *E);
323  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
324                            Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
325
326  // Binary operators and binary compound assignment operators.
327#define HANDLEBINOP(OP) \
328  Value *VisitBin ## OP(const BinaryOperator *E) {                         \
329    return Emit ## OP(EmitBinOps(E));                                      \
330  }                                                                        \
331  Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) {       \
332    return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP);          \
333  }
334  HANDLEBINOP(Mul);
335  HANDLEBINOP(Div);
336  HANDLEBINOP(Rem);
337  HANDLEBINOP(Add);
338  HANDLEBINOP(Sub);
339  HANDLEBINOP(Shl);
340  HANDLEBINOP(Shr);
341  HANDLEBINOP(And);
342  HANDLEBINOP(Xor);
343  HANDLEBINOP(Or);
344#undef HANDLEBINOP
345
346  // Comparisons.
347  Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
348                     unsigned SICmpOpc, unsigned FCmpOpc);
349#define VISITCOMP(CODE, UI, SI, FP) \
350    Value *VisitBin##CODE(const BinaryOperator *E) { \
351      return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
352                         llvm::FCmpInst::FP); }
353  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT);
354  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT);
355  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE);
356  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE);
357  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ);
358  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE);
359#undef VISITCOMP
360
361  Value *VisitBinAssign     (const BinaryOperator *E);
362
363  Value *VisitBinLAnd       (const BinaryOperator *E);
364  Value *VisitBinLOr        (const BinaryOperator *E);
365  Value *VisitBinComma      (const BinaryOperator *E);
366
367  // Other Operators.
368  Value *VisitBlockExpr(const BlockExpr *BE);
369  Value *VisitConditionalOperator(const ConditionalOperator *CO);
370  Value *VisitChooseExpr(ChooseExpr *CE);
371  Value *VisitVAArgExpr(VAArgExpr *VE);
372  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
373    return CGF.EmitObjCStringLiteral(E);
374  }
375};
376}  // end anonymous namespace.
377
378//===----------------------------------------------------------------------===//
379//                                Utilities
380//===----------------------------------------------------------------------===//
381
382/// EmitConversionToBool - Convert the specified expression value to a
383/// boolean (i1) truth value.  This is equivalent to "Val != 0".
384Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
385  assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs");
386
387  if (SrcType->isRealFloatingType()) {
388    // Compare against 0.0 for fp scalars.
389    llvm::Value *Zero = VMContext.getNullValue(Src->getType());
390    return Builder.CreateFCmpUNE(Src, Zero, "tobool");
391  }
392
393  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
394         "Unknown scalar type to convert");
395
396  // Because of the type rules of C, we often end up computing a logical value,
397  // then zero extending it to int, then wanting it as a logical value again.
398  // Optimize this common case.
399  if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
400    if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) {
401      Value *Result = ZI->getOperand(0);
402      // If there aren't any more uses, zap the instruction to save space.
403      // Note that there can be more uses, for example if this
404      // is the result of an assignment.
405      if (ZI->use_empty())
406        ZI->eraseFromParent();
407      return Result;
408    }
409  }
410
411  // Compare against an integer or pointer null.
412  llvm::Value *Zero = VMContext.getNullValue(Src->getType());
413  return Builder.CreateICmpNE(Src, Zero, "tobool");
414}
415
416/// EmitScalarConversion - Emit a conversion from the specified type to the
417/// specified destination type, both of which are LLVM scalar types.
418Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
419                                               QualType DstType) {
420  SrcType = CGF.getContext().getCanonicalType(SrcType);
421  DstType = CGF.getContext().getCanonicalType(DstType);
422  if (SrcType == DstType) return Src;
423
424  if (DstType->isVoidType()) return 0;
425
426  // Handle conversions to bool first, they are special: comparisons against 0.
427  if (DstType->isBooleanType())
428    return EmitConversionToBool(Src, SrcType);
429
430  const llvm::Type *DstTy = ConvertType(DstType);
431
432  // Ignore conversions like int -> uint.
433  if (Src->getType() == DstTy)
434    return Src;
435
436  // Handle pointer conversions next: pointers can only be converted
437  // to/from other pointers and integers. Check for pointer types in
438  // terms of LLVM, as some native types (like Obj-C id) may map to a
439  // pointer type.
440  if (isa<llvm::PointerType>(DstTy)) {
441    // The source value may be an integer, or a pointer.
442    if (isa<llvm::PointerType>(Src->getType()))
443      return Builder.CreateBitCast(Src, DstTy, "conv");
444    assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
445    // First, convert to the correct width so that we control the kind of
446    // extension.
447    const llvm::Type *MiddleTy = VMContext.getIntegerType(CGF.LLVMPointerWidth);
448    bool InputSigned = SrcType->isSignedIntegerType();
449    llvm::Value* IntResult =
450        Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
451    // Then, cast to pointer.
452    return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
453  }
454
455  if (isa<llvm::PointerType>(Src->getType())) {
456    // Must be an ptr to int cast.
457    assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
458    return Builder.CreatePtrToInt(Src, DstTy, "conv");
459  }
460
461  // A scalar can be splatted to an extended vector of the same element type
462  if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) {
463    // Cast the scalar to element type
464    QualType EltTy = DstType->getAsExtVectorType()->getElementType();
465    llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
466
467    // Insert the element in element zero of an undef vector
468    llvm::Value *UnV = VMContext.getUndef(DstTy);
469    llvm::Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, 0);
470    UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
471
472    // Splat the element across to all elements
473    llvm::SmallVector<llvm::Constant*, 16> Args;
474    unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
475    for (unsigned i = 0; i < NumElements; i++)
476      Args.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, 0));
477
478    llvm::Constant *Mask = VMContext.getConstantVector(&Args[0], NumElements);
479    llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
480    return Yay;
481  }
482
483  // Allow bitcast from vector to integer/fp of the same size.
484  if (isa<llvm::VectorType>(Src->getType()) ||
485      isa<llvm::VectorType>(DstTy))
486    return Builder.CreateBitCast(Src, DstTy, "conv");
487
488  // Finally, we have the arithmetic types: real int/float.
489  if (isa<llvm::IntegerType>(Src->getType())) {
490    bool InputSigned = SrcType->isSignedIntegerType();
491    if (isa<llvm::IntegerType>(DstTy))
492      return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
493    else if (InputSigned)
494      return Builder.CreateSIToFP(Src, DstTy, "conv");
495    else
496      return Builder.CreateUIToFP(Src, DstTy, "conv");
497  }
498
499  assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
500  if (isa<llvm::IntegerType>(DstTy)) {
501    if (DstType->isSignedIntegerType())
502      return Builder.CreateFPToSI(Src, DstTy, "conv");
503    else
504      return Builder.CreateFPToUI(Src, DstTy, "conv");
505  }
506
507  assert(DstTy->isFloatingPoint() && "Unknown real conversion");
508  if (DstTy->getTypeID() < Src->getType()->getTypeID())
509    return Builder.CreateFPTrunc(Src, DstTy, "conv");
510  else
511    return Builder.CreateFPExt(Src, DstTy, "conv");
512}
513
514/// EmitComplexToScalarConversion - Emit a conversion from the specified
515/// complex type to the specified destination type, where the destination
516/// type is an LLVM scalar type.
517Value *ScalarExprEmitter::
518EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
519                              QualType SrcTy, QualType DstTy) {
520  // Get the source element type.
521  SrcTy = SrcTy->getAsComplexType()->getElementType();
522
523  // Handle conversions to bool first, they are special: comparisons against 0.
524  if (DstTy->isBooleanType()) {
525    //  Complex != 0  -> (Real != 0) | (Imag != 0)
526    Src.first  = EmitScalarConversion(Src.first, SrcTy, DstTy);
527    Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
528    return Builder.CreateOr(Src.first, Src.second, "tobool");
529  }
530
531  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
532  // the imaginary part of the complex value is discarded and the value of the
533  // real part is converted according to the conversion rules for the
534  // corresponding real type.
535  return EmitScalarConversion(Src.first, SrcTy, DstTy);
536}
537
538
539//===----------------------------------------------------------------------===//
540//                            Visitor Methods
541//===----------------------------------------------------------------------===//
542
543Value *ScalarExprEmitter::VisitExpr(Expr *E) {
544  CGF.ErrorUnsupported(E, "scalar expression");
545  if (E->getType()->isVoidType())
546    return 0;
547  return VMContext.getUndef(CGF.ConvertType(E->getType()));
548}
549
550Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
551  llvm::SmallVector<llvm::Constant*, 32> indices;
552  for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
553    indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
554  }
555  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
556  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
557  Value* SV = VMContext.getConstantVector(indices.begin(), indices.size());
558  return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
559}
560
561Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
562  TestAndClearIgnoreResultAssign();
563
564  // Emit subscript expressions in rvalue context's.  For most cases, this just
565  // loads the lvalue formed by the subscript expr.  However, we have to be
566  // careful, because the base of a vector subscript is occasionally an rvalue,
567  // so we can't get it as an lvalue.
568  if (!E->getBase()->getType()->isVectorType())
569    return EmitLoadOfLValue(E);
570
571  // Handle the vector case.  The base must be a vector, the index must be an
572  // integer value.
573  Value *Base = Visit(E->getBase());
574  Value *Idx  = Visit(E->getIdx());
575  bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
576  Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned,
577                              "vecidxcast");
578  return Builder.CreateExtractElement(Base, Idx, "vecext");
579}
580
581/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but
582/// also handle things like function to pointer-to-function decay, and array to
583/// pointer decay.
584Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) {
585  const Expr *Op = E->getSubExpr();
586
587  // If this is due to array->pointer conversion, emit the array expression as
588  // an l-value.
589  if (Op->getType()->isArrayType()) {
590    Value *V = EmitLValue(Op).getAddress();  // Bitfields can't be arrays.
591
592    // Note that VLA pointers are always decayed, so we don't need to do
593    // anything here.
594    if (!Op->getType()->isVariableArrayType()) {
595      assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
596      assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
597                                 ->getElementType()) &&
598             "Expected pointer to array");
599      V = Builder.CreateStructGEP(V, 0, "arraydecay");
600    }
601
602    // The resultant pointer type can be implicitly casted to other pointer
603    // types as well (e.g. void*) and can be implicitly converted to integer.
604    const llvm::Type *DestTy = ConvertType(E->getType());
605    if (V->getType() != DestTy) {
606      if (isa<llvm::PointerType>(DestTy))
607        V = Builder.CreateBitCast(V, DestTy, "ptrconv");
608      else {
609        assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay");
610        V = Builder.CreatePtrToInt(V, DestTy, "ptrconv");
611      }
612    }
613    return V;
614  }
615
616  return EmitCastExpr(Op, E->getType());
617}
618
619
620// VisitCastExpr - Emit code for an explicit or implicit cast.  Implicit casts
621// have to handle a more broad range of conversions than explicit casts, as they
622// handle things like function to ptr-to-function decay etc.
623Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) {
624  if (!DestTy->isVoidType())
625    TestAndClearIgnoreResultAssign();
626
627  // Handle cases where the source is an non-complex type.
628
629  if (!CGF.hasAggregateLLVMType(E->getType())) {
630    Value *Src = Visit(const_cast<Expr*>(E));
631
632    // Use EmitScalarConversion to perform the conversion.
633    return EmitScalarConversion(Src, E->getType(), DestTy);
634  }
635
636  if (E->getType()->isAnyComplexType()) {
637    // Handle cases where the source is a complex type.
638    bool IgnoreImag = true;
639    bool IgnoreImagAssign = true;
640    bool IgnoreReal = IgnoreResultAssign;
641    bool IgnoreRealAssign = IgnoreResultAssign;
642    if (DestTy->isBooleanType())
643      IgnoreImagAssign = IgnoreImag = false;
644    else if (DestTy->isVoidType()) {
645      IgnoreReal = IgnoreImag = false;
646      IgnoreRealAssign = IgnoreImagAssign = true;
647    }
648    CodeGenFunction::ComplexPairTy V
649      = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
650                            IgnoreImagAssign);
651    return EmitComplexToScalarConversion(V, E->getType(), DestTy);
652  }
653
654  // Okay, this is a cast from an aggregate.  It must be a cast to void.  Just
655  // evaluate the result and return.
656  CGF.EmitAggExpr(E, 0, false, true);
657  return 0;
658}
659
660Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
661  return CGF.EmitCompoundStmt(*E->getSubStmt(),
662                              !E->getType()->isVoidType()).getScalarVal();
663}
664
665Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
666  return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp");
667}
668
669//===----------------------------------------------------------------------===//
670//                             Unary Operators
671//===----------------------------------------------------------------------===//
672
673Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
674                                             bool isInc, bool isPre) {
675  LValue LV = EmitLValue(E->getSubExpr());
676  QualType ValTy = E->getSubExpr()->getType();
677  Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal();
678
679  int AmountVal = isInc ? 1 : -1;
680
681  if (ValTy->isPointerType() &&
682      ValTy->getAs<PointerType>()->isVariableArrayType()) {
683    // The amount of the addition/subtraction needs to account for the VLA size
684    CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
685  }
686
687  Value *NextVal;
688  if (const llvm::PointerType *PT =
689         dyn_cast<llvm::PointerType>(InVal->getType())) {
690    llvm::Constant *Inc =
691      VMContext.getConstantInt(llvm::Type::Int32Ty, AmountVal);
692    if (!isa<llvm::FunctionType>(PT->getElementType())) {
693      QualType PTEE = ValTy->getPointeeType();
694      if (const ObjCInterfaceType *OIT =
695          dyn_cast<ObjCInterfaceType>(PTEE)) {
696        // Handle interface types, which are not represented with a concrete type.
697        int size = CGF.getContext().getTypeSize(OIT) / 8;
698        if (!isInc)
699          size = -size;
700        Inc = VMContext.getConstantInt(Inc->getType(), size);
701        const llvm::Type *i8Ty =
702          VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
703        InVal = Builder.CreateBitCast(InVal, i8Ty);
704        NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
705        llvm::Value *lhs = LV.getAddress();
706        lhs = Builder.CreateBitCast(lhs, VMContext.getPointerTypeUnqual(i8Ty));
707        LV = LValue::MakeAddr(lhs, ValTy.getCVRQualifiers(),
708                              CGF.getContext().getObjCGCAttrKind(ValTy));
709      }
710      else
711        NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec");
712    } else {
713      const llvm::Type *i8Ty =
714        VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
715      NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
716      NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
717      NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
718    }
719  } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) {
720    // Bool++ is an interesting case, due to promotion rules, we get:
721    // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
722    // Bool = ((int)Bool+1) != 0
723    // An interesting aspect of this is that increment is always true.
724    // Decrement does not have this property.
725    NextVal = VMContext.getConstantIntTrue();
726  } else if (isa<llvm::IntegerType>(InVal->getType())) {
727    NextVal = VMContext.getConstantInt(InVal->getType(), AmountVal);
728    NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
729  } else {
730    // Add the inc/dec to the real part.
731    if (InVal->getType() == llvm::Type::FloatTy)
732      NextVal =
733        VMContext.getConstantFP(llvm::APFloat(static_cast<float>(AmountVal)));
734    else if (InVal->getType() == llvm::Type::DoubleTy)
735      NextVal =
736        VMContext.getConstantFP(llvm::APFloat(static_cast<double>(AmountVal)));
737    else {
738      llvm::APFloat F(static_cast<float>(AmountVal));
739      bool ignored;
740      F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
741                &ignored);
742      NextVal = VMContext.getConstantFP(F);
743    }
744    NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
745  }
746
747  // Store the updated result through the lvalue.
748  if (LV.isBitfield())
749    CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy,
750                                       &NextVal);
751  else
752    CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
753
754  // If this is a postinc, return the value read from memory, otherwise use the
755  // updated value.
756  return isPre ? NextVal : InVal;
757}
758
759
760Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
761  TestAndClearIgnoreResultAssign();
762  Value *Op = Visit(E->getSubExpr());
763  if (Op->getType()->isFPOrFPVector())
764    return Builder.CreateFNeg(Op, "neg");
765  return Builder.CreateNeg(Op, "neg");
766}
767
768Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
769  TestAndClearIgnoreResultAssign();
770  Value *Op = Visit(E->getSubExpr());
771  return Builder.CreateNot(Op, "neg");
772}
773
774Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
775  // Compare operand to zero.
776  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
777
778  // Invert value.
779  // TODO: Could dynamically modify easy computations here.  For example, if
780  // the operand is an icmp ne, turn into icmp eq.
781  BoolVal = Builder.CreateNot(BoolVal, "lnot");
782
783  // ZExt result to the expr type.
784  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
785}
786
787/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
788/// argument of the sizeof expression as an integer.
789Value *
790ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
791  QualType TypeToSize = E->getTypeOfArgument();
792  if (E->isSizeOf()) {
793    if (const VariableArrayType *VAT =
794          CGF.getContext().getAsVariableArrayType(TypeToSize)) {
795      if (E->isArgumentType()) {
796        // sizeof(type) - make sure to emit the VLA size.
797        CGF.EmitVLASize(TypeToSize);
798      } else {
799        // C99 6.5.3.4p2: If the argument is an expression of type
800        // VLA, it is evaluated.
801        CGF.EmitAnyExpr(E->getArgumentExpr());
802      }
803
804      return CGF.GetVLASize(VAT);
805    }
806  }
807
808  // If this isn't sizeof(vla), the result must be constant; use the
809  // constant folding logic so we don't have to duplicate it here.
810  Expr::EvalResult Result;
811  E->Evaluate(Result, CGF.getContext());
812  return VMContext.getConstantInt(Result.Val.getInt());
813}
814
815Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
816  Expr *Op = E->getSubExpr();
817  if (Op->getType()->isAnyComplexType())
818    return CGF.EmitComplexExpr(Op, false, true, false, true).first;
819  return Visit(Op);
820}
821Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
822  Expr *Op = E->getSubExpr();
823  if (Op->getType()->isAnyComplexType())
824    return CGF.EmitComplexExpr(Op, true, false, true, false).second;
825
826  // __imag on a scalar returns zero.  Emit the subexpr to ensure side
827  // effects are evaluated, but not the actual value.
828  if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
829    CGF.EmitLValue(Op);
830  else
831    CGF.EmitScalarExpr(Op, true);
832  return VMContext.getNullValue(ConvertType(E->getType()));
833}
834
835Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
836{
837  Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
838  const llvm::Type* ResultType = ConvertType(E->getType());
839  return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
840}
841
842//===----------------------------------------------------------------------===//
843//                           Binary Operators
844//===----------------------------------------------------------------------===//
845
846BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
847  TestAndClearIgnoreResultAssign();
848  BinOpInfo Result;
849  Result.LHS = Visit(E->getLHS());
850  Result.RHS = Visit(E->getRHS());
851  Result.Ty  = E->getType();
852  Result.E = E;
853  return Result;
854}
855
856Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
857                      Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
858  bool Ignore = TestAndClearIgnoreResultAssign();
859  QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
860
861  BinOpInfo OpInfo;
862
863  if (E->getComputationResultType()->isAnyComplexType()) {
864    // This needs to go through the complex expression emitter, but
865    // it's a tad complicated to do that... I'm leaving it out for now.
866    // (Note that we do actually need the imaginary part of the RHS for
867    // multiplication and division.)
868    CGF.ErrorUnsupported(E, "complex compound assignment");
869    return VMContext.getUndef(CGF.ConvertType(E->getType()));
870  }
871
872  // Emit the RHS first.  __block variables need to have the rhs evaluated
873  // first, plus this should improve codegen a little.
874  OpInfo.RHS = Visit(E->getRHS());
875  OpInfo.Ty = E->getComputationResultType();
876  OpInfo.E = E;
877  // Load/convert the LHS.
878  LValue LHSLV = EmitLValue(E->getLHS());
879  OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
880  OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
881                                    E->getComputationLHSType());
882
883  // Expand the binary operator.
884  Value *Result = (this->*Func)(OpInfo);
885
886  // Convert the result back to the LHS type.
887  Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
888
889  // Store the result value into the LHS lvalue. Bit-fields are
890  // handled specially because the result is altered by the store,
891  // i.e., [C99 6.5.16p1] 'An assignment expression has the value of
892  // the left operand after the assignment...'.
893  if (LHSLV.isBitfield()) {
894    if (!LHSLV.isVolatileQualified()) {
895      CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
896                                         &Result);
897      return Result;
898    } else
899      CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
900  } else
901    CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
902  if (Ignore)
903    return 0;
904  return EmitLoadOfLValue(LHSLV, E->getType());
905}
906
907
908Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
909  if (Ops.LHS->getType()->isFPOrFPVector())
910    return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
911  else if (Ops.Ty->isUnsignedIntegerType())
912    return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
913  else
914    return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
915}
916
917Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
918  // Rem in C can't be a floating point type: C99 6.5.5p2.
919  if (Ops.Ty->isUnsignedIntegerType())
920    return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
921  else
922    return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
923}
924
925Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
926  unsigned IID;
927  unsigned OpID = 0;
928
929  switch (Ops.E->getOpcode()) {
930  case BinaryOperator::Add:
931  case BinaryOperator::AddAssign:
932    OpID = 1;
933    IID = llvm::Intrinsic::sadd_with_overflow;
934    break;
935  case BinaryOperator::Sub:
936  case BinaryOperator::SubAssign:
937    OpID = 2;
938    IID = llvm::Intrinsic::ssub_with_overflow;
939    break;
940  case BinaryOperator::Mul:
941  case BinaryOperator::MulAssign:
942    OpID = 3;
943    IID = llvm::Intrinsic::smul_with_overflow;
944    break;
945  default:
946    assert(false && "Unsupported operation for overflow detection");
947    IID = 0;
948  }
949  OpID <<= 1;
950  OpID |= 1;
951
952  const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
953
954  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
955
956  Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
957  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
958  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
959
960  // Branch in case of overflow.
961  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
962  llvm::BasicBlock *overflowBB =
963    CGF.createBasicBlock("overflow", CGF.CurFn);
964  llvm::BasicBlock *continueBB =
965    CGF.createBasicBlock("overflow.continue", CGF.CurFn);
966
967  Builder.CreateCondBr(overflow, overflowBB, continueBB);
968
969  // Handle overflow
970
971  Builder.SetInsertPoint(overflowBB);
972
973  // Handler is:
974  // long long *__overflow_handler)(long long a, long long b, char op,
975  // char width)
976  std::vector<const llvm::Type*> handerArgTypes;
977  handerArgTypes.push_back(llvm::Type::Int64Ty);
978  handerArgTypes.push_back(llvm::Type::Int64Ty);
979  handerArgTypes.push_back(llvm::Type::Int8Ty);
980  handerArgTypes.push_back(llvm::Type::Int8Ty);
981  llvm::FunctionType *handlerTy = VMContext.getFunctionType(llvm::Type::Int64Ty,
982      handerArgTypes, false);
983  llvm::Value *handlerFunction =
984    CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
985        VMContext.getPointerTypeUnqual(handlerTy));
986  handlerFunction = Builder.CreateLoad(handlerFunction);
987
988  llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
989      Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty),
990      Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty),
991      VMContext.getConstantInt(llvm::Type::Int8Ty, OpID),
992      VMContext.getConstantInt(llvm::Type::Int8Ty,
993        cast<llvm::IntegerType>(opTy)->getBitWidth()));
994
995  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
996
997  Builder.CreateBr(continueBB);
998
999  // Set up the continuation
1000  Builder.SetInsertPoint(continueBB);
1001  // Get the correct result
1002  llvm::PHINode *phi = Builder.CreatePHI(opTy);
1003  phi->reserveOperandSpace(2);
1004  phi->addIncoming(result, initialBB);
1005  phi->addIncoming(handlerResult, overflowBB);
1006
1007  return phi;
1008}
1009
1010Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
1011  if (!Ops.Ty->isAnyPointerType()) {
1012    if (CGF.getContext().getLangOptions().OverflowChecking &&
1013        Ops.Ty->isSignedIntegerType())
1014      return EmitOverflowCheckedBinOp(Ops);
1015
1016    if (Ops.LHS->getType()->isFPOrFPVector())
1017      return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
1018
1019    return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
1020  }
1021
1022  if (Ops.Ty->isPointerType() &&
1023      Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
1024    // The amount of the addition needs to account for the VLA size
1025    CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
1026  }
1027  Value *Ptr, *Idx;
1028  Expr *IdxExp;
1029  const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>();
1030  const ObjCObjectPointerType *OPT =
1031    Ops.E->getLHS()->getType()->getAsObjCObjectPointerType();
1032  if (PT || OPT) {
1033    Ptr = Ops.LHS;
1034    Idx = Ops.RHS;
1035    IdxExp = Ops.E->getRHS();
1036  } else {  // int + pointer
1037    PT = Ops.E->getRHS()->getType()->getAs<PointerType>();
1038    OPT = Ops.E->getRHS()->getType()->getAsObjCObjectPointerType();
1039    assert((PT || OPT) && "Invalid add expr");
1040    Ptr = Ops.RHS;
1041    Idx = Ops.LHS;
1042    IdxExp = Ops.E->getLHS();
1043  }
1044
1045  unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
1046  if (Width < CGF.LLVMPointerWidth) {
1047    // Zero or sign extend the pointer value based on whether the index is
1048    // signed or not.
1049    const llvm::Type *IdxType = VMContext.getIntegerType(CGF.LLVMPointerWidth);
1050    if (IdxExp->getType()->isSignedIntegerType())
1051      Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
1052    else
1053      Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
1054  }
1055  const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
1056  // Handle interface types, which are not represented with a concrete
1057  // type.
1058  if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
1059    llvm::Value *InterfaceSize =
1060      VMContext.getConstantInt(Idx->getType(),
1061                             CGF.getContext().getTypeSize(OIT) / 8);
1062    Idx = Builder.CreateMul(Idx, InterfaceSize);
1063    const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1064    Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
1065    Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
1066    return Builder.CreateBitCast(Res, Ptr->getType());
1067  }
1068
1069  // Explicitly handle GNU void* and function pointer arithmetic
1070  // extensions. The GNU void* casts amount to no-ops since our void*
1071  // type is i8*, but this is future proof.
1072  if (ElementType->isVoidType() || ElementType->isFunctionType()) {
1073    const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1074    Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
1075    Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
1076    return Builder.CreateBitCast(Res, Ptr->getType());
1077  }
1078
1079  return Builder.CreateGEP(Ptr, Idx, "add.ptr");
1080}
1081
1082Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
1083  if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
1084    if (CGF.getContext().getLangOptions().OverflowChecking
1085        && Ops.Ty->isSignedIntegerType())
1086      return EmitOverflowCheckedBinOp(Ops);
1087
1088    if (Ops.LHS->getType()->isFPOrFPVector())
1089      return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
1090    return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
1091  }
1092
1093  if (Ops.E->getLHS()->getType()->isPointerType() &&
1094      Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
1095    // The amount of the addition needs to account for the VLA size for
1096    // ptr-int
1097    // The amount of the division needs to account for the VLA size for
1098    // ptr-ptr.
1099    CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
1100  }
1101
1102  const QualType LHSType = Ops.E->getLHS()->getType();
1103  const QualType LHSElementType = LHSType->getPointeeType();
1104  if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
1105    // pointer - int
1106    Value *Idx = Ops.RHS;
1107    unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
1108    if (Width < CGF.LLVMPointerWidth) {
1109      // Zero or sign extend the pointer value based on whether the index is
1110      // signed or not.
1111      const llvm::Type *IdxType =
1112        VMContext.getIntegerType(CGF.LLVMPointerWidth);
1113      if (Ops.E->getRHS()->getType()->isSignedIntegerType())
1114        Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
1115      else
1116        Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
1117    }
1118    Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
1119
1120    // Handle interface types, which are not represented with a concrete
1121    // type.
1122    if (const ObjCInterfaceType *OIT =
1123        dyn_cast<ObjCInterfaceType>(LHSElementType)) {
1124      llvm::Value *InterfaceSize =
1125        VMContext.getConstantInt(Idx->getType(),
1126                               CGF.getContext().getTypeSize(OIT) / 8);
1127      Idx = Builder.CreateMul(Idx, InterfaceSize);
1128      const llvm::Type *i8Ty =
1129        VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1130      Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
1131      Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
1132      return Builder.CreateBitCast(Res, Ops.LHS->getType());
1133    }
1134
1135    // Explicitly handle GNU void* and function pointer arithmetic
1136    // extensions. The GNU void* casts amount to no-ops since our
1137    // void* type is i8*, but this is future proof.
1138    if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
1139      const llvm::Type *i8Ty =
1140        VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1141      Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
1142      Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
1143      return Builder.CreateBitCast(Res, Ops.LHS->getType());
1144    }
1145
1146    return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
1147  } else {
1148    // pointer - pointer
1149    Value *LHS = Ops.LHS;
1150    Value *RHS = Ops.RHS;
1151
1152    uint64_t ElementSize;
1153
1154    // Handle GCC extension for pointer arithmetic on void* and function pointer
1155    // types.
1156    if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
1157      ElementSize = 1;
1158    } else {
1159      ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
1160    }
1161
1162    const llvm::Type *ResultType = ConvertType(Ops.Ty);
1163    LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
1164    RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1165    Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
1166
1167    // Optimize out the shift for element size of 1.
1168    if (ElementSize == 1)
1169      return BytesBetween;
1170
1171    // HACK: LLVM doesn't have an divide instruction that 'knows' there is no
1172    // remainder.  As such, we handle common power-of-two cases here to generate
1173    // better code. See PR2247.
1174    if (llvm::isPowerOf2_64(ElementSize)) {
1175      Value *ShAmt =
1176        VMContext.getConstantInt(ResultType, llvm::Log2_64(ElementSize));
1177      return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
1178    }
1179
1180    // Otherwise, do a full sdiv.
1181    Value *BytesPerElt = VMContext.getConstantInt(ResultType, ElementSize);
1182    return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
1183  }
1184}
1185
1186Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
1187  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
1188  // RHS to the same size as the LHS.
1189  Value *RHS = Ops.RHS;
1190  if (Ops.LHS->getType() != RHS->getType())
1191    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
1192
1193  return Builder.CreateShl(Ops.LHS, RHS, "shl");
1194}
1195
1196Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
1197  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
1198  // RHS to the same size as the LHS.
1199  Value *RHS = Ops.RHS;
1200  if (Ops.LHS->getType() != RHS->getType())
1201    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
1202
1203  if (Ops.Ty->isUnsignedIntegerType())
1204    return Builder.CreateLShr(Ops.LHS, RHS, "shr");
1205  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
1206}
1207
1208Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
1209                                      unsigned SICmpOpc, unsigned FCmpOpc) {
1210  TestAndClearIgnoreResultAssign();
1211  Value *Result;
1212  QualType LHSTy = E->getLHS()->getType();
1213  if (!LHSTy->isAnyComplexType()) {
1214    Value *LHS = Visit(E->getLHS());
1215    Value *RHS = Visit(E->getRHS());
1216
1217    if (LHS->getType()->isFloatingPoint()) {
1218      Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
1219                                  LHS, RHS, "cmp");
1220    } else if (LHSTy->isSignedIntegerType()) {
1221      Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
1222                                  LHS, RHS, "cmp");
1223    } else {
1224      // Unsigned integers and pointers.
1225      Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
1226                                  LHS, RHS, "cmp");
1227    }
1228
1229    // If this is a vector comparison, sign extend the result to the appropriate
1230    // vector integer type and return it (don't convert to bool).
1231    if (LHSTy->isVectorType())
1232      return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
1233
1234  } else {
1235    // Complex Comparison: can only be an equality comparison.
1236    CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
1237    CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
1238
1239    QualType CETy = LHSTy->getAsComplexType()->getElementType();
1240
1241    Value *ResultR, *ResultI;
1242    if (CETy->isRealFloatingType()) {
1243      ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
1244                                   LHS.first, RHS.first, "cmp.r");
1245      ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
1246                                   LHS.second, RHS.second, "cmp.i");
1247    } else {
1248      // Complex comparisons can only be equality comparisons.  As such, signed
1249      // and unsigned opcodes are the same.
1250      ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
1251                                   LHS.first, RHS.first, "cmp.r");
1252      ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
1253                                   LHS.second, RHS.second, "cmp.i");
1254    }
1255
1256    if (E->getOpcode() == BinaryOperator::EQ) {
1257      Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
1258    } else {
1259      assert(E->getOpcode() == BinaryOperator::NE &&
1260             "Complex comparison other than == or != ?");
1261      Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
1262    }
1263  }
1264
1265  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
1266}
1267
1268Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1269  bool Ignore = TestAndClearIgnoreResultAssign();
1270
1271  // __block variables need to have the rhs evaluated first, plus this should
1272  // improve codegen just a little.
1273  Value *RHS = Visit(E->getRHS());
1274  LValue LHS = EmitLValue(E->getLHS());
1275
1276  // Store the value into the LHS.  Bit-fields are handled specially
1277  // because the result is altered by the store, i.e., [C99 6.5.16p1]
1278  // 'An assignment expression has the value of the left operand after
1279  // the assignment...'.
1280  if (LHS.isBitfield()) {
1281    if (!LHS.isVolatileQualified()) {
1282      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
1283                                         &RHS);
1284      return RHS;
1285    } else
1286      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
1287  } else
1288    CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
1289  if (Ignore)
1290    return 0;
1291  return EmitLoadOfLValue(LHS, E->getType());
1292}
1293
1294Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
1295  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
1296  // If we have 1 && X, just emit X without inserting the control flow.
1297  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
1298    if (Cond == 1) { // If we have 1 && X, just emit X.
1299      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1300      // ZExt result to int.
1301      return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext");
1302    }
1303
1304    // 0 && RHS: If it is safe, just elide the RHS, and return 0.
1305    if (!CGF.ContainsLabel(E->getRHS()))
1306      return VMContext.getNullValue(CGF.LLVMIntTy);
1307  }
1308
1309  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
1310  llvm::BasicBlock *RHSBlock  = CGF.createBasicBlock("land.rhs");
1311
1312  // Branch on the LHS first.  If it is false, go to the failure (cont) block.
1313  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
1314
1315  // Any edges into the ContBlock are now from an (indeterminate number of)
1316  // edges from this first condition.  All of these values will be false.  Start
1317  // setting up the PHI node in the Cont Block for this.
1318  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
1319  PN->reserveOperandSpace(2);  // Normal case, two inputs.
1320  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
1321       PI != PE; ++PI)
1322    PN->addIncoming(VMContext.getConstantIntFalse(), *PI);
1323
1324  CGF.PushConditionalTempDestruction();
1325  CGF.EmitBlock(RHSBlock);
1326  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1327  CGF.PopConditionalTempDestruction();
1328
1329  // Reaquire the RHS block, as there may be subblocks inserted.
1330  RHSBlock = Builder.GetInsertBlock();
1331
1332  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
1333  // into the phi node for the edge with the value of RHSCond.
1334  CGF.EmitBlock(ContBlock);
1335  PN->addIncoming(RHSCond, RHSBlock);
1336
1337  // ZExt result to int.
1338  return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext");
1339}
1340
1341Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
1342  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
1343  // If we have 0 || X, just emit X without inserting the control flow.
1344  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
1345    if (Cond == -1) { // If we have 0 || X, just emit X.
1346      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1347      // ZExt result to int.
1348      return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext");
1349    }
1350
1351    // 1 || RHS: If it is safe, just elide the RHS, and return 1.
1352    if (!CGF.ContainsLabel(E->getRHS()))
1353      return VMContext.getConstantInt(CGF.LLVMIntTy, 1);
1354  }
1355
1356  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
1357  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
1358
1359  // Branch on the LHS first.  If it is true, go to the success (cont) block.
1360  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
1361
1362  // Any edges into the ContBlock are now from an (indeterminate number of)
1363  // edges from this first condition.  All of these values will be true.  Start
1364  // setting up the PHI node in the Cont Block for this.
1365  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
1366  PN->reserveOperandSpace(2);  // Normal case, two inputs.
1367  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
1368       PI != PE; ++PI)
1369    PN->addIncoming(VMContext.getConstantIntTrue(), *PI);
1370
1371  CGF.PushConditionalTempDestruction();
1372
1373  // Emit the RHS condition as a bool value.
1374  CGF.EmitBlock(RHSBlock);
1375  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1376
1377  CGF.PopConditionalTempDestruction();
1378
1379  // Reaquire the RHS block, as there may be subblocks inserted.
1380  RHSBlock = Builder.GetInsertBlock();
1381
1382  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
1383  // into the phi node for the edge with the value of RHSCond.
1384  CGF.EmitBlock(ContBlock);
1385  PN->addIncoming(RHSCond, RHSBlock);
1386
1387  // ZExt result to int.
1388  return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext");
1389}
1390
1391Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
1392  CGF.EmitStmt(E->getLHS());
1393  CGF.EnsureInsertPoint();
1394  return Visit(E->getRHS());
1395}
1396
1397//===----------------------------------------------------------------------===//
1398//                             Other Operators
1399//===----------------------------------------------------------------------===//
1400
1401/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
1402/// expression is cheap enough and side-effect-free enough to evaluate
1403/// unconditionally instead of conditionally.  This is used to convert control
1404/// flow into selects in some cases.
1405static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
1406  if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
1407    return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr());
1408
1409  // TODO: Allow anything we can constant fold to an integer or fp constant.
1410  if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
1411      isa<FloatingLiteral>(E))
1412    return true;
1413
1414  // Non-volatile automatic variables too, to get "cond ? X : Y" where
1415  // X and Y are local variables.
1416  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
1417    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
1418      if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified())
1419        return true;
1420
1421  return false;
1422}
1423
1424
1425Value *ScalarExprEmitter::
1426VisitConditionalOperator(const ConditionalOperator *E) {
1427  TestAndClearIgnoreResultAssign();
1428  // If the condition constant folds and can be elided, try to avoid emitting
1429  // the condition and the dead arm.
1430  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
1431    Expr *Live = E->getLHS(), *Dead = E->getRHS();
1432    if (Cond == -1)
1433      std::swap(Live, Dead);
1434
1435    // If the dead side doesn't have labels we need, and if the Live side isn't
1436    // the gnu missing ?: extension (which we could handle, but don't bother
1437    // to), just emit the Live part.
1438    if ((!Dead || !CGF.ContainsLabel(Dead)) &&  // No labels in dead part
1439        Live)                                   // Live part isn't missing.
1440      return Visit(Live);
1441  }
1442
1443
1444  // If this is a really simple expression (like x ? 4 : 5), emit this as a
1445  // select instead of as control flow.  We can only do this if it is cheap and
1446  // safe to evaluate the LHS and RHS unconditionally.
1447  if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) &&
1448      isCheapEnoughToEvaluateUnconditionally(E->getRHS())) {
1449    llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
1450    llvm::Value *LHS = Visit(E->getLHS());
1451    llvm::Value *RHS = Visit(E->getRHS());
1452    return Builder.CreateSelect(CondV, LHS, RHS, "cond");
1453  }
1454
1455
1456  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1457  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1458  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1459  Value *CondVal = 0;
1460
1461  // If we don't have the GNU missing condition extension, emit a branch on
1462  // bool the normal way.
1463  if (E->getLHS()) {
1464    // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
1465    // the branch on bool.
1466    CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
1467  } else {
1468    // Otherwise, for the ?: extension, evaluate the conditional and then
1469    // convert it to bool the hard way.  We do this explicitly because we need
1470    // the unconverted value for the missing middle value of the ?:.
1471    CondVal = CGF.EmitScalarExpr(E->getCond());
1472
1473    // In some cases, EmitScalarConversion will delete the "CondVal" expression
1474    // if there are no extra uses (an optimization).  Inhibit this by making an
1475    // extra dead use, because we're going to add a use of CondVal later.  We
1476    // don't use the builder for this, because we don't want it to get optimized
1477    // away.  This leaves dead code, but the ?: extension isn't common.
1478    new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
1479                          Builder.GetInsertBlock());
1480
1481    Value *CondBoolVal =
1482      CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
1483                               CGF.getContext().BoolTy);
1484    Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
1485  }
1486
1487  CGF.PushConditionalTempDestruction();
1488  CGF.EmitBlock(LHSBlock);
1489
1490  // Handle the GNU extension for missing LHS.
1491  Value *LHS;
1492  if (E->getLHS())
1493    LHS = Visit(E->getLHS());
1494  else    // Perform promotions, to handle cases like "short ?: int"
1495    LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
1496
1497  CGF.PopConditionalTempDestruction();
1498  LHSBlock = Builder.GetInsertBlock();
1499  CGF.EmitBranch(ContBlock);
1500
1501  CGF.PushConditionalTempDestruction();
1502  CGF.EmitBlock(RHSBlock);
1503
1504  Value *RHS = Visit(E->getRHS());
1505  CGF.PopConditionalTempDestruction();
1506  RHSBlock = Builder.GetInsertBlock();
1507  CGF.EmitBranch(ContBlock);
1508
1509  CGF.EmitBlock(ContBlock);
1510
1511  if (!LHS || !RHS) {
1512    assert(E->getType()->isVoidType() && "Non-void value should have a value");
1513    return 0;
1514  }
1515
1516  // Create a PHI node for the real part.
1517  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
1518  PN->reserveOperandSpace(2);
1519  PN->addIncoming(LHS, LHSBlock);
1520  PN->addIncoming(RHS, RHSBlock);
1521  return PN;
1522}
1523
1524Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
1525  return Visit(E->getChosenSubExpr(CGF.getContext()));
1526}
1527
1528Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1529  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
1530  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
1531
1532  // If EmitVAArg fails, we fall back to the LLVM instruction.
1533  if (!ArgPtr)
1534    return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
1535
1536  // FIXME Volatility.
1537  return Builder.CreateLoad(ArgPtr);
1538}
1539
1540Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
1541  return CGF.BuildBlockLiteralTmp(BE);
1542}
1543
1544//===----------------------------------------------------------------------===//
1545//                         Entry Point into this File
1546//===----------------------------------------------------------------------===//
1547
1548/// EmitScalarExpr - Emit the computation of the specified expression of
1549/// scalar type, ignoring the result.
1550Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
1551  assert(E && !hasAggregateLLVMType(E->getType()) &&
1552         "Invalid scalar expression to emit");
1553
1554  return ScalarExprEmitter(*this, IgnoreResultAssign)
1555    .Visit(const_cast<Expr*>(E));
1556}
1557
1558/// EmitScalarConversion - Emit a conversion from the specified type to the
1559/// specified destination type, both of which are LLVM scalar types.
1560Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
1561                                             QualType DstTy) {
1562  assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
1563         "Invalid scalar expression to emit");
1564  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
1565}
1566
1567/// EmitComplexToScalarConversion - Emit a conversion from the specified
1568/// complex type to the specified destination type, where the destination
1569/// type is an LLVM scalar type.
1570Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
1571                                                      QualType SrcTy,
1572                                                      QualType DstTy) {
1573  assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
1574         "Invalid complex -> scalar conversion");
1575  return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
1576                                                                DstTy);
1577}
1578
1579Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
1580  assert(V1->getType() == V2->getType() &&
1581         "Vector operands must be of the same type");
1582  unsigned NumElements =
1583    cast<llvm::VectorType>(V1->getType())->getNumElements();
1584
1585  va_list va;
1586  va_start(va, V2);
1587
1588  llvm::SmallVector<llvm::Constant*, 16> Args;
1589  for (unsigned i = 0; i < NumElements; i++) {
1590    int n = va_arg(va, int);
1591    assert(n >= 0 && n < (int)NumElements * 2 &&
1592           "Vector shuffle index out of bounds!");
1593    Args.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, n));
1594  }
1595
1596  const char *Name = va_arg(va, const char *);
1597  va_end(va);
1598
1599  llvm::Constant *Mask = VMContext.getConstantVector(&Args[0], NumElements);
1600
1601  return Builder.CreateShuffleVector(V1, V2, Mask, Name);
1602}
1603
1604llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
1605                                         unsigned NumVals, bool isSplat) {
1606  llvm::Value *Vec
1607    = VMContext.getUndef(VMContext.getVectorType(Vals[0]->getType(), NumVals));
1608
1609  for (unsigned i = 0, e = NumVals; i != e; ++i) {
1610    llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
1611    llvm::Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
1612    Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
1613  }
1614
1615  return Vec;
1616}
1617